4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
56 #include <rte_flow_driver.h>
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
64 item = (pattern) + (index); \
65 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
67 item = (pattern) + (index); \
71 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
73 act = (actions) + (index); \
74 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
76 act = (actions) + (index); \
80 #define IGB_FLEX_RAW_NUM 12
83 * Please aware there's an asumption for all the parsers.
84 * rte_flow_item is using big endian, rte_flow_attr and
85 * rte_flow_action are using CPU order.
86 * Because the pattern is used to describe the packets,
87 * normally the packets should use network order.
91 * Parse the rule to see if it is a n-tuple rule.
92 * And get the n-tuple filter info BTW.
94 * The first not void item can be ETH or IPV4.
95 * The second not void item must be IPV4 if the first one is ETH.
96 * The third not void item must be UDP or TCP or SCTP
97 * The next not void item must be END.
99 * The first not void action should be QUEUE.
100 * The next not void action should be END.
104 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
105 * dst_addr 192.167.3.50 0xFFFFFFFF
106 * next_proto_id 17 0xFF
107 * UDP/TCP/ src_port 80 0xFFFF
108 * SCTP dst_port 80 0xFFFF
110 * other members in mask and spec should set to 0x00.
111 * item->last should be NULL.
114 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
115 const struct rte_flow_item pattern[],
116 const struct rte_flow_action actions[],
117 struct rte_eth_ntuple_filter *filter,
118 struct rte_flow_error *error)
120 const struct rte_flow_item *item;
121 const struct rte_flow_action *act;
122 const struct rte_flow_item_ipv4 *ipv4_spec;
123 const struct rte_flow_item_ipv4 *ipv4_mask;
124 const struct rte_flow_item_tcp *tcp_spec;
125 const struct rte_flow_item_tcp *tcp_mask;
126 const struct rte_flow_item_udp *udp_spec;
127 const struct rte_flow_item_udp *udp_mask;
128 const struct rte_flow_item_sctp *sctp_spec;
129 const struct rte_flow_item_sctp *sctp_mask;
133 rte_flow_error_set(error,
134 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
135 NULL, "NULL pattern.");
140 rte_flow_error_set(error, EINVAL,
141 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
142 NULL, "NULL action.");
146 rte_flow_error_set(error, EINVAL,
147 RTE_FLOW_ERROR_TYPE_ATTR,
148 NULL, "NULL attribute.");
155 /* the first not void item can be MAC or IPv4 */
156 NEXT_ITEM_OF_PATTERN(item, pattern, index);
158 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
159 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
160 rte_flow_error_set(error, EINVAL,
161 RTE_FLOW_ERROR_TYPE_ITEM,
162 item, "Not supported by ntuple filter");
166 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
167 /*Not supported last point for range*/
169 rte_flow_error_set(error,
171 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
172 item, "Not supported last point for range");
175 /* if the first item is MAC, the content should be NULL */
176 if (item->spec || item->mask) {
177 rte_flow_error_set(error, EINVAL,
178 RTE_FLOW_ERROR_TYPE_ITEM,
179 item, "Not supported by ntuple filter");
182 /* check if the next not void item is IPv4 */
184 NEXT_ITEM_OF_PATTERN(item, pattern, index);
185 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
186 rte_flow_error_set(error,
187 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188 item, "Not supported by ntuple filter");
193 /* get the IPv4 info */
194 if (!item->spec || !item->mask) {
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ITEM,
197 item, "Invalid ntuple mask");
200 /* Not supported last point for range */
202 rte_flow_error_set(error, EINVAL,
203 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
204 item, "Not supported last point for range");
208 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
210 * Only support src & dst addresses, protocol,
211 * others should be masked.
214 if (ipv4_mask->hdr.version_ihl ||
215 ipv4_mask->hdr.type_of_service ||
216 ipv4_mask->hdr.total_length ||
217 ipv4_mask->hdr.packet_id ||
218 ipv4_mask->hdr.fragment_offset ||
219 ipv4_mask->hdr.time_to_live ||
220 ipv4_mask->hdr.hdr_checksum) {
221 rte_flow_error_set(error,
222 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
223 item, "Not supported by ntuple filter");
227 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
228 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
229 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
231 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
232 filter->dst_ip = ipv4_spec->hdr.dst_addr;
233 filter->src_ip = ipv4_spec->hdr.src_addr;
234 filter->proto = ipv4_spec->hdr.next_proto_id;
236 /* check if the next not void item is TCP or UDP or SCTP */
238 NEXT_ITEM_OF_PATTERN(item, pattern, index);
239 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
240 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
241 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
242 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243 rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_ITEM,
245 item, "Not supported by ntuple filter");
249 /* Not supported last point for range */
251 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
254 item, "Not supported last point for range");
258 /* get the TCP/UDP/SCTP info */
259 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
260 if (item->spec && item->mask) {
261 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
264 * Only support src & dst ports, tcp flags,
265 * others should be masked.
267 if (tcp_mask->hdr.sent_seq ||
268 tcp_mask->hdr.recv_ack ||
269 tcp_mask->hdr.data_off ||
270 tcp_mask->hdr.rx_win ||
271 tcp_mask->hdr.cksum ||
272 tcp_mask->hdr.tcp_urp) {
274 sizeof(struct rte_eth_ntuple_filter));
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_ITEM,
277 item, "Not supported by ntuple filter");
281 filter->dst_port_mask = tcp_mask->hdr.dst_port;
282 filter->src_port_mask = tcp_mask->hdr.src_port;
283 if (tcp_mask->hdr.tcp_flags == 0xFF) {
284 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
285 } else if (!tcp_mask->hdr.tcp_flags) {
286 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
289 sizeof(struct rte_eth_ntuple_filter));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Not supported by ntuple filter");
296 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
297 filter->dst_port = tcp_spec->hdr.dst_port;
298 filter->src_port = tcp_spec->hdr.src_port;
299 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
301 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
302 if (item->spec && item->mask) {
303 udp_mask = (const struct rte_flow_item_udp *)item->mask;
306 * Only support src & dst ports,
307 * others should be masked.
309 if (udp_mask->hdr.dgram_len ||
310 udp_mask->hdr.dgram_cksum) {
312 sizeof(struct rte_eth_ntuple_filter));
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM,
315 item, "Not supported by ntuple filter");
319 filter->dst_port_mask = udp_mask->hdr.dst_port;
320 filter->src_port_mask = udp_mask->hdr.src_port;
322 udp_spec = (const struct rte_flow_item_udp *)item->spec;
323 filter->dst_port = udp_spec->hdr.dst_port;
324 filter->src_port = udp_spec->hdr.src_port;
327 if (item->spec && item->mask) {
328 sctp_mask = (const struct rte_flow_item_sctp *)
332 * Only support src & dst ports,
333 * others should be masked.
335 if (sctp_mask->hdr.tag ||
336 sctp_mask->hdr.cksum) {
338 sizeof(struct rte_eth_ntuple_filter));
339 rte_flow_error_set(error, EINVAL,
340 RTE_FLOW_ERROR_TYPE_ITEM,
341 item, "Not supported by ntuple filter");
345 filter->dst_port_mask = sctp_mask->hdr.dst_port;
346 filter->src_port_mask = sctp_mask->hdr.src_port;
348 sctp_spec = (const struct rte_flow_item_sctp *)
350 filter->dst_port = sctp_spec->hdr.dst_port;
351 filter->src_port = sctp_spec->hdr.src_port;
354 /* check if the next not void item is END */
356 NEXT_ITEM_OF_PATTERN(item, pattern, index);
357 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
358 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
359 rte_flow_error_set(error, EINVAL,
360 RTE_FLOW_ERROR_TYPE_ITEM,
361 item, "Not supported by ntuple filter");
369 * n-tuple only supports forwarding,
370 * check if the first not void action is QUEUE.
372 NEXT_ITEM_OF_ACTION(act, actions, index);
373 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
374 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_ACTION,
377 item, "Not supported action.");
381 ((const struct rte_flow_action_queue *)act->conf)->index;
383 /* check if the next not void item is END */
385 NEXT_ITEM_OF_ACTION(act, actions, index);
386 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
387 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_ACTION,
390 act, "Not supported action.");
395 /* must be input direction */
396 if (!attr->ingress) {
397 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
400 attr, "Only support ingress.");
406 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
409 attr, "Not support egress.");
413 if (attr->priority > 0xFFFF) {
414 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
417 attr, "Error priority.");
420 filter->priority = (uint16_t)attr->priority;
425 /* a specific function for igb because the flags is specific */
427 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
428 const struct rte_flow_attr *attr,
429 const struct rte_flow_item pattern[],
430 const struct rte_flow_action actions[],
431 struct rte_eth_ntuple_filter *filter,
432 struct rte_flow_error *error)
434 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
437 MAC_TYPE_FILTER_SUP(hw->mac.type);
439 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
444 /* Igb doesn't support many priorities. */
445 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
446 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447 rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM,
449 NULL, "Priority not supported by ntuple filter");
453 if (hw->mac.type == e1000_82576) {
454 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
455 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
458 NULL, "queue number not "
459 "supported by ntuple filter");
462 filter->flags |= RTE_5TUPLE_FLAGS;
464 if (filter->src_ip_mask || filter->dst_ip_mask ||
465 filter->src_port_mask) {
466 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467 rte_flow_error_set(error, EINVAL,
468 RTE_FLOW_ERROR_TYPE_ITEM,
469 NULL, "only two tuple are "
470 "supported by this filter");
473 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 NULL, "queue number not "
478 "supported by ntuple filter");
481 filter->flags |= RTE_2TUPLE_FLAGS;
488 * Parse the rule to see if it is a ethertype rule.
489 * And get the ethertype filter info BTW.
491 * The first not void item can be ETH.
492 * The next not void item must be END.
494 * The first not void action should be QUEUE.
495 * The next not void action should be END.
498 * ETH type 0x0807 0xFFFF
500 * other members in mask and spec should set to 0x00.
501 * item->last should be NULL.
504 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
505 const struct rte_flow_item *pattern,
506 const struct rte_flow_action *actions,
507 struct rte_eth_ethertype_filter *filter,
508 struct rte_flow_error *error)
510 const struct rte_flow_item *item;
511 const struct rte_flow_action *act;
512 const struct rte_flow_item_eth *eth_spec;
513 const struct rte_flow_item_eth *eth_mask;
514 const struct rte_flow_action_queue *act_q;
518 rte_flow_error_set(error, EINVAL,
519 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
520 NULL, "NULL pattern.");
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
527 NULL, "NULL action.");
532 rte_flow_error_set(error, EINVAL,
533 RTE_FLOW_ERROR_TYPE_ATTR,
534 NULL, "NULL attribute.");
541 /* The first non-void item should be MAC. */
542 NEXT_ITEM_OF_PATTERN(item, pattern, index);
543 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ITEM,
546 item, "Not supported by ethertype filter");
550 /*Not supported last point for range*/
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
554 item, "Not supported last point for range");
558 /* Get the MAC info. */
559 if (!item->spec || !item->mask) {
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ITEM,
562 item, "Not supported by ethertype filter");
566 eth_spec = (const struct rte_flow_item_eth *)item->spec;
567 eth_mask = (const struct rte_flow_item_eth *)item->mask;
569 /* Mask bits of source MAC address must be full of 0.
570 * Mask bits of destination MAC address must be full
573 if (!is_zero_ether_addr(ð_mask->src) ||
574 (!is_zero_ether_addr(ð_mask->dst) &&
575 !is_broadcast_ether_addr(ð_mask->dst))) {
576 rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ITEM,
578 item, "Invalid ether address mask");
582 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ITEM,
585 item, "Invalid ethertype mask");
589 /* If mask bits of destination MAC address
590 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
592 if (is_broadcast_ether_addr(ð_mask->dst)) {
593 filter->mac_addr = eth_spec->dst;
594 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
596 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
598 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
600 /* Check if the next non-void item is END. */
602 NEXT_ITEM_OF_PATTERN(item, pattern, index);
603 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
604 rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM,
606 item, "Not supported by ethertype filter.");
613 /* Check if the first non-void action is QUEUE or DROP. */
614 NEXT_ITEM_OF_ACTION(act, actions, index);
615 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
616 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
617 rte_flow_error_set(error, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ACTION,
619 act, "Not supported action.");
623 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
624 act_q = (const struct rte_flow_action_queue *)act->conf;
625 filter->queue = act_q->index;
627 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
630 /* Check if the next non-void item is END */
632 NEXT_ITEM_OF_ACTION(act, actions, index);
633 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ACTION,
636 act, "Not supported action.");
641 /* Must be input direction */
642 if (!attr->ingress) {
643 rte_flow_error_set(error, EINVAL,
644 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
645 attr, "Only support ingress.");
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
653 attr, "Not support egress.");
658 if (attr->priority) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
661 attr, "Not support priority.");
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
669 attr, "Not support group.");
677 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
678 const struct rte_flow_attr *attr,
679 const struct rte_flow_item pattern[],
680 const struct rte_flow_action actions[],
681 struct rte_eth_ethertype_filter *filter,
682 struct rte_flow_error *error)
684 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687 MAC_TYPE_FILTER_SUP(hw->mac.type);
689 ret = cons_parse_ethertype_filter(attr, pattern,
690 actions, filter, error);
695 if (hw->mac.type == e1000_82576) {
696 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
697 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
698 rte_flow_error_set(error, EINVAL,
699 RTE_FLOW_ERROR_TYPE_ITEM,
700 NULL, "queue number not supported "
701 "by ethertype filter");
705 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
706 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ITEM,
709 NULL, "queue number not supported "
710 "by ethertype filter");
715 if (filter->ether_type == ETHER_TYPE_IPv4 ||
716 filter->ether_type == ETHER_TYPE_IPv6) {
717 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 NULL, "IPv4/IPv6 not supported by ethertype filter");
724 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ITEM,
728 NULL, "mac compare is unsupported");
732 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 NULL, "drop option is unsupported");
744 * Parse the rule to see if it is a TCP SYN rule.
745 * And get the TCP SYN filter info BTW.
747 * The first not void item must be ETH.
748 * The second not void item must be IPV4 or IPV6.
749 * The third not void item must be TCP.
750 * The next not void item must be END.
752 * The first not void action should be QUEUE.
753 * The next not void action should be END.
757 * IPV4/IPV6 NULL NULL
758 * TCP tcp_flags 0x02 0xFF
760 * other members in mask and spec should set to 0x00.
761 * item->last should be NULL.
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765 const struct rte_flow_item pattern[],
766 const struct rte_flow_action actions[],
767 struct rte_eth_syn_filter *filter,
768 struct rte_flow_error *error)
770 const struct rte_flow_item *item;
771 const struct rte_flow_action *act;
772 const struct rte_flow_item_tcp *tcp_spec;
773 const struct rte_flow_item_tcp *tcp_mask;
774 const struct rte_flow_action_queue *act_q;
778 rte_flow_error_set(error, EINVAL,
779 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
780 NULL, "NULL pattern.");
785 rte_flow_error_set(error, EINVAL,
786 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
787 NULL, "NULL action.");
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ATTR,
794 NULL, "NULL attribute.");
801 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
802 NEXT_ITEM_OF_PATTERN(item, pattern, index);
803 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
804 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
806 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM,
809 item, "Not supported by syn filter");
812 /*Not supported last point for range*/
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
816 item, "Not supported last point for range");
821 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
822 /* if the item is MAC, the content should be NULL */
823 if (item->spec || item->mask) {
824 rte_flow_error_set(error, EINVAL,
825 RTE_FLOW_ERROR_TYPE_ITEM,
826 item, "Invalid SYN address mask");
830 /* check if the next not void item is IPv4 or IPv6 */
832 NEXT_ITEM_OF_PATTERN(item, pattern, index);
833 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
834 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
835 rte_flow_error_set(error, EINVAL,
836 RTE_FLOW_ERROR_TYPE_ITEM,
837 item, "Not supported by syn filter");
843 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
844 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
845 /* if the item is IP, the content should be NULL */
846 if (item->spec || item->mask) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
849 item, "Invalid SYN mask");
853 /* check if the next not void item is TCP */
855 NEXT_ITEM_OF_PATTERN(item, pattern, index);
856 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
859 item, "Not supported by syn filter");
864 /* Get the TCP info. Only support SYN. */
865 if (!item->spec || !item->mask) {
866 rte_flow_error_set(error, EINVAL,
867 RTE_FLOW_ERROR_TYPE_ITEM,
868 item, "Invalid SYN mask");
871 /*Not supported last point for range*/
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
875 item, "Not supported last point for range");
879 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
880 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
881 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
882 tcp_mask->hdr.src_port ||
883 tcp_mask->hdr.dst_port ||
884 tcp_mask->hdr.sent_seq ||
885 tcp_mask->hdr.recv_ack ||
886 tcp_mask->hdr.data_off ||
887 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
888 tcp_mask->hdr.rx_win ||
889 tcp_mask->hdr.cksum ||
890 tcp_mask->hdr.tcp_urp) {
891 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ITEM,
894 item, "Not supported by syn filter");
898 /* check if the next not void item is END */
900 NEXT_ITEM_OF_PATTERN(item, pattern, index);
901 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
902 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM,
905 item, "Not supported by syn filter");
912 /* check if the first not void action is QUEUE. */
913 NEXT_ITEM_OF_ACTION(act, actions, index);
914 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
915 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ACTION,
918 act, "Not supported action.");
922 act_q = (const struct rte_flow_action_queue *)act->conf;
923 filter->queue = act_q->index;
925 /* check if the next not void item is END */
927 NEXT_ITEM_OF_ACTION(act, actions, index);
928 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
929 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION,
932 act, "Not supported action.");
937 /* must be input direction */
938 if (!attr->ingress) {
939 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
940 rte_flow_error_set(error, EINVAL,
941 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
942 attr, "Only support ingress.");
948 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
949 rte_flow_error_set(error, EINVAL,
950 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
951 attr, "Not support egress.");
955 /* Support 2 priorities, the lowest or highest. */
956 if (!attr->priority) {
958 } else if (attr->priority == (uint32_t)~0U) {
961 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962 rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
964 attr, "Not support priority.");
972 igb_parse_syn_filter(struct rte_eth_dev *dev,
973 const struct rte_flow_attr *attr,
974 const struct rte_flow_item pattern[],
975 const struct rte_flow_action actions[],
976 struct rte_eth_syn_filter *filter,
977 struct rte_flow_error *error)
979 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
982 MAC_TYPE_FILTER_SUP(hw->mac.type);
984 ret = cons_parse_syn_filter(attr, pattern,
985 actions, filter, error);
987 if (hw->mac.type == e1000_82576) {
988 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
989 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM,
992 NULL, "queue number not "
993 "supported by syn filter");
997 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
998 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
999 rte_flow_error_set(error, EINVAL,
1000 RTE_FLOW_ERROR_TYPE_ITEM,
1001 NULL, "queue number not "
1002 "supported by syn filter");
1014 * Parse the rule to see if it is a flex byte rule.
1015 * And get the flex byte filter info BTW.
1017 * The first not void item must be RAW.
1018 * The second not void item can be RAW or END.
1019 * The third not void item can be RAW or END.
1020 * The last not void item must be END.
1022 * The first not void action should be QUEUE.
1023 * The next not void action should be END.
1026 * RAW relative 0 0x1
1027 * offset 0 0xFFFFFFFF
1028 * pattern {0x08, 0x06} {0xFF, 0xFF}
1029 * RAW relative 1 0x1
1030 * offset 100 0xFFFFFFFF
1031 * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
1033 * other members in mask and spec should set to 0x00.
1034 * item->last should be NULL.
1037 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1038 const struct rte_flow_item pattern[],
1039 const struct rte_flow_action actions[],
1040 struct rte_eth_flex_filter *filter,
1041 struct rte_flow_error *error)
1043 const struct rte_flow_item *item;
1044 const struct rte_flow_action *act;
1045 const struct rte_flow_item_raw *raw_spec;
1046 const struct rte_flow_item_raw *raw_mask;
1047 const struct rte_flow_action_queue *act_q;
1048 uint32_t index, i, offset, total_offset;
1049 uint32_t max_offset = 0;
1050 int32_t shift, j, raw_index = 0;
1051 int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1052 int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1057 NULL, "NULL pattern.");
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1064 NULL, "NULL action.");
1069 rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_ATTR,
1071 NULL, "NULL attribute.");
1080 /* the first not void item should be RAW */
1081 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1082 if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1083 rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ITEM,
1085 item, "Not supported by flex filter");
1088 /*Not supported last point for range*/
1090 rte_flow_error_set(error, EINVAL,
1091 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1092 item, "Not supported last point for range");
1096 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1097 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1099 if (!raw_mask->length ||
1100 !raw_mask->relative) {
1101 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1102 rte_flow_error_set(error, EINVAL,
1103 RTE_FLOW_ERROR_TYPE_ITEM,
1104 item, "Not supported by flex filter");
1108 if (raw_mask->offset)
1109 offset = raw_spec->offset;
1113 for (j = 0; j < raw_spec->length; j++) {
1114 if (raw_mask->pattern[j] != 0xFF) {
1115 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1116 rte_flow_error_set(error, EINVAL,
1117 RTE_FLOW_ERROR_TYPE_ITEM,
1118 item, "Not supported by flex filter");
1125 if (raw_spec->relative) {
1126 for (j = raw_index; j > 0; j--) {
1127 total_offset += raw_offset[j - 1];
1128 if (!relative[j - 1])
1131 if (total_offset + raw_spec->length + offset > max_offset)
1132 max_offset = total_offset + raw_spec->length + offset;
1134 if (raw_spec->length + offset > max_offset)
1135 max_offset = raw_spec->length + offset;
1138 if ((raw_spec->length + offset + total_offset) >
1139 RTE_FLEX_FILTER_MAXLEN) {
1140 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1141 rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ITEM,
1143 item, "Not supported by flex filter");
1147 if (raw_spec->relative == 0) {
1148 for (j = 0; j < raw_spec->length; j++)
1149 filter->bytes[offset + j] =
1150 raw_spec->pattern[j];
1151 j = offset / CHAR_BIT;
1152 shift = offset % CHAR_BIT;
1154 for (j = 0; j < raw_spec->length; j++)
1155 filter->bytes[total_offset + offset + j] =
1156 raw_spec->pattern[j];
1157 j = (total_offset + offset) / CHAR_BIT;
1158 shift = (total_offset + offset) % CHAR_BIT;
1163 for ( ; shift < CHAR_BIT; shift++) {
1164 filter->mask[j] |= (0x80 >> shift);
1166 if (i == raw_spec->length)
1168 if (shift == (CHAR_BIT - 1)) {
1174 relative[raw_index] = raw_spec->relative;
1175 raw_offset[raw_index] = offset + raw_spec->length;
1178 /* check if the next not void item is RAW */
1180 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1181 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1182 item->type != RTE_FLOW_ITEM_TYPE_END) {
1183 rte_flow_error_set(error, EINVAL,
1184 RTE_FLOW_ERROR_TYPE_ITEM,
1185 item, "Not supported by flex filter");
1189 /* go back to parser */
1190 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1191 /* if the item is RAW, the content should be parse */
1195 filter->len = RTE_ALIGN(max_offset, 8);
1200 /* check if the first not void action is QUEUE. */
1201 NEXT_ITEM_OF_ACTION(act, actions, index);
1202 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1203 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1206 act, "Not supported action.");
1210 act_q = (const struct rte_flow_action_queue *)act->conf;
1211 filter->queue = act_q->index;
1213 /* check if the next not void item is END */
1215 NEXT_ITEM_OF_ACTION(act, actions, index);
1216 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1217 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ACTION,
1220 act, "Not supported action.");
1225 /* must be input direction */
1226 if (!attr->ingress) {
1227 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1230 attr, "Only support ingress.");
1236 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1237 rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1239 attr, "Not support egress.");
1243 if (attr->priority > 0xFFFF) {
1244 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1245 rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1247 attr, "Error priority.");
1251 filter->priority = (uint16_t)attr->priority;
1257 igb_parse_flex_filter(struct rte_eth_dev *dev,
1258 const struct rte_flow_attr *attr,
1259 const struct rte_flow_item pattern[],
1260 const struct rte_flow_action actions[],
1261 struct rte_eth_flex_filter *filter,
1262 struct rte_flow_error *error)
1264 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1267 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1269 ret = cons_parse_flex_filter(attr, pattern,
1270 actions, filter, error);
1272 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1273 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ITEM,
1276 NULL, "queue number not supported by flex filter");
1280 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1281 filter->len % sizeof(uint64_t) != 0) {
1282 PMD_DRV_LOG(ERR, "filter's length is out of range");
1286 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1287 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1298 * Create a flow rule.
1299 * Theorically one rule can match more than one filters.
1300 * We will let it use the filter which it hitt first.
1301 * So, the sequence matters.
1303 static struct rte_flow *
1304 igb_flow_create(struct rte_eth_dev *dev,
1305 const struct rte_flow_attr *attr,
1306 const struct rte_flow_item pattern[],
1307 const struct rte_flow_action actions[],
1308 struct rte_flow_error *error)
1311 struct rte_eth_ntuple_filter ntuple_filter;
1312 struct rte_eth_ethertype_filter ethertype_filter;
1313 struct rte_eth_syn_filter syn_filter;
1314 struct rte_eth_flex_filter flex_filter;
1315 struct rte_flow *flow = NULL;
1316 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1317 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1318 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1319 struct igb_flex_filter_ele *flex_filter_ptr;
1320 struct igb_flow_mem *igb_flow_mem_ptr;
1322 flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1324 PMD_DRV_LOG(ERR, "failed to allocate memory");
1325 return (struct rte_flow *)flow;
1327 igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1328 sizeof(struct igb_flow_mem), 0);
1329 if (!igb_flow_mem_ptr) {
1330 PMD_DRV_LOG(ERR, "failed to allocate memory");
1334 igb_flow_mem_ptr->flow = flow;
1335 igb_flow_mem_ptr->dev = dev;
1336 TAILQ_INSERT_TAIL(&igb_flow_list,
1337 igb_flow_mem_ptr, entries);
1339 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1340 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1341 actions, &ntuple_filter, error);
1343 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1345 ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1346 sizeof(struct igb_ntuple_filter_ele), 0);
1347 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
1349 sizeof(struct rte_eth_ntuple_filter));
1350 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1351 ntuple_filter_ptr, entries);
1352 flow->rule = ntuple_filter_ptr;
1353 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1359 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1360 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1361 actions, ðertype_filter, error);
1363 ret = igb_add_del_ethertype_filter(dev,
1364 ðertype_filter, TRUE);
1366 ethertype_filter_ptr = rte_zmalloc(
1367 "igb_ethertype_filter",
1368 sizeof(struct igb_ethertype_filter_ele), 0);
1369 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
1371 sizeof(struct rte_eth_ethertype_filter));
1372 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1373 ethertype_filter_ptr, entries);
1374 flow->rule = ethertype_filter_ptr;
1375 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1381 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1382 ret = igb_parse_syn_filter(dev, attr, pattern,
1383 actions, &syn_filter, error);
1385 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1387 syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1388 sizeof(struct igb_eth_syn_filter_ele), 0);
1389 (void)rte_memcpy(&syn_filter_ptr->filter_info,
1391 sizeof(struct rte_eth_syn_filter));
1392 TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1395 flow->rule = syn_filter_ptr;
1396 flow->filter_type = RTE_ETH_FILTER_SYN;
1402 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1403 ret = igb_parse_flex_filter(dev, attr, pattern,
1404 actions, &flex_filter, error);
1406 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1408 flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1409 sizeof(struct igb_flex_filter_ele), 0);
1410 (void)rte_memcpy(&flex_filter_ptr->filter_info,
1412 sizeof(struct rte_eth_flex_filter));
1413 TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1414 flex_filter_ptr, entries);
1415 flow->rule = flex_filter_ptr;
1416 flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1422 TAILQ_REMOVE(&igb_flow_list,
1423 igb_flow_mem_ptr, entries);
1424 rte_flow_error_set(error, -ret,
1425 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1426 "Failed to create flow.");
1427 rte_free(igb_flow_mem_ptr);
1433 * Check if the flow rule is supported by igb.
1434 * It only checkes the format. Don't guarantee the rule can be programmed into
1435 * the HW. Because there can be no enough room for the rule.
1438 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1439 const struct rte_flow_attr *attr,
1440 const struct rte_flow_item pattern[],
1441 const struct rte_flow_action actions[],
1442 struct rte_flow_error *error)
1444 struct rte_eth_ntuple_filter ntuple_filter;
1445 struct rte_eth_ethertype_filter ethertype_filter;
1446 struct rte_eth_syn_filter syn_filter;
1447 struct rte_eth_flex_filter flex_filter;
1450 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1451 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1452 actions, &ntuple_filter, error);
1456 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1457 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1458 actions, ðertype_filter, error);
1462 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1463 ret = igb_parse_syn_filter(dev, attr, pattern,
1464 actions, &syn_filter, error);
1468 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1469 ret = igb_parse_flex_filter(dev, attr, pattern,
1470 actions, &flex_filter, error);
1475 /* Destroy a flow rule on igb. */
1477 igb_flow_destroy(struct rte_eth_dev *dev,
1478 struct rte_flow *flow,
1479 struct rte_flow_error *error)
1482 struct rte_flow *pmd_flow = flow;
1483 enum rte_filter_type filter_type = pmd_flow->filter_type;
1484 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1485 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1486 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1487 struct igb_flex_filter_ele *flex_filter_ptr;
1488 struct igb_flow_mem *igb_flow_mem_ptr;
1490 switch (filter_type) {
1491 case RTE_ETH_FILTER_NTUPLE:
1492 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1494 ret = igb_add_del_ntuple_filter(dev,
1495 &ntuple_filter_ptr->filter_info, FALSE);
1497 TAILQ_REMOVE(&igb_filter_ntuple_list,
1498 ntuple_filter_ptr, entries);
1499 rte_free(ntuple_filter_ptr);
1502 case RTE_ETH_FILTER_ETHERTYPE:
1503 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1505 ret = igb_add_del_ethertype_filter(dev,
1506 ðertype_filter_ptr->filter_info, FALSE);
1508 TAILQ_REMOVE(&igb_filter_ethertype_list,
1509 ethertype_filter_ptr, entries);
1510 rte_free(ethertype_filter_ptr);
1513 case RTE_ETH_FILTER_SYN:
1514 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1516 ret = eth_igb_syn_filter_set(dev,
1517 &syn_filter_ptr->filter_info, FALSE);
1519 TAILQ_REMOVE(&igb_filter_syn_list,
1520 syn_filter_ptr, entries);
1521 rte_free(syn_filter_ptr);
1524 case RTE_ETH_FILTER_FLEXIBLE:
1525 flex_filter_ptr = (struct igb_flex_filter_ele *)
1527 ret = eth_igb_add_del_flex_filter(dev,
1528 &flex_filter_ptr->filter_info, FALSE);
1530 TAILQ_REMOVE(&igb_filter_flex_list,
1531 flex_filter_ptr, entries);
1532 rte_free(flex_filter_ptr);
1536 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1543 rte_flow_error_set(error, EINVAL,
1544 RTE_FLOW_ERROR_TYPE_HANDLE,
1545 NULL, "Failed to destroy flow");
1549 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1550 if (igb_flow_mem_ptr->flow == pmd_flow) {
1551 TAILQ_REMOVE(&igb_flow_list,
1552 igb_flow_mem_ptr, entries);
1553 rte_free(igb_flow_mem_ptr);
1561 /* remove all the n-tuple filters */
1563 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1565 struct e1000_filter_info *filter_info =
1566 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1567 struct e1000_5tuple_filter *p_5tuple;
1568 struct e1000_2tuple_filter *p_2tuple;
1570 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1571 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1573 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1574 igb_delete_2tuple_filter(dev, p_2tuple);
1577 /* remove all the ether type filters */
1579 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1581 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582 struct e1000_filter_info *filter_info =
1583 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1586 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1587 if (filter_info->ethertype_mask & (1 << i)) {
1588 (void)igb_ethertype_filter_remove(filter_info,
1590 E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1591 E1000_WRITE_FLUSH(hw);
1596 /* remove the SYN filter */
1598 igb_clear_syn_filter(struct rte_eth_dev *dev)
1600 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1601 struct e1000_filter_info *filter_info =
1602 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1604 if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1605 filter_info->syn_info = 0;
1606 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1607 E1000_WRITE_FLUSH(hw);
1611 /* remove all the flex filters */
1613 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1615 struct e1000_filter_info *filter_info =
1616 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1617 struct e1000_flex_filter *flex_filter;
1619 while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1620 igb_remove_flex_filter(dev, flex_filter);
1624 igb_filterlist_flush(struct rte_eth_dev *dev)
1626 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1627 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1628 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1629 struct igb_flex_filter_ele *flex_filter_ptr;
1630 struct igb_flow_mem *igb_flow_mem_ptr;
1631 enum rte_filter_type filter_type;
1632 struct rte_flow *pmd_flow;
1634 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1635 if (igb_flow_mem_ptr->dev == dev) {
1636 pmd_flow = igb_flow_mem_ptr->flow;
1637 filter_type = pmd_flow->filter_type;
1639 switch (filter_type) {
1640 case RTE_ETH_FILTER_NTUPLE:
1642 (struct igb_ntuple_filter_ele *)
1644 TAILQ_REMOVE(&igb_filter_ntuple_list,
1645 ntuple_filter_ptr, entries);
1646 rte_free(ntuple_filter_ptr);
1648 case RTE_ETH_FILTER_ETHERTYPE:
1649 ethertype_filter_ptr =
1650 (struct igb_ethertype_filter_ele *)
1652 TAILQ_REMOVE(&igb_filter_ethertype_list,
1653 ethertype_filter_ptr, entries);
1654 rte_free(ethertype_filter_ptr);
1656 case RTE_ETH_FILTER_SYN:
1658 (struct igb_eth_syn_filter_ele *)
1660 TAILQ_REMOVE(&igb_filter_syn_list,
1661 syn_filter_ptr, entries);
1662 rte_free(syn_filter_ptr);
1664 case RTE_ETH_FILTER_FLEXIBLE:
1666 (struct igb_flex_filter_ele *)
1668 TAILQ_REMOVE(&igb_filter_flex_list,
1669 flex_filter_ptr, entries);
1670 rte_free(flex_filter_ptr);
1673 PMD_DRV_LOG(WARNING, "Filter type"
1674 "(%d) not supported", filter_type);
1677 TAILQ_REMOVE(&igb_flow_list,
1680 rte_free(igb_flow_mem_ptr->flow);
1681 rte_free(igb_flow_mem_ptr);
1686 /* Destroy all flow rules associated with a port on igb. */
1688 igb_flow_flush(struct rte_eth_dev *dev,
1689 __rte_unused struct rte_flow_error *error)
1691 igb_clear_all_ntuple_filter(dev);
1692 igb_clear_all_ethertype_filter(dev);
1693 igb_clear_syn_filter(dev);
1694 igb_clear_all_flex_filter(dev);
1695 igb_filterlist_flush(dev);
1700 const struct rte_flow_ops igb_flow_ops = {
1701 .validate = igb_flow_validate,
1702 .create = igb_flow_create,
1703 .destroy = igb_flow_destroy,
1704 .flush = igb_flow_flush,