net: add rte prefix to ether defines
[dpdk.git] / drivers / net / e1000 / igb_flow.c
index ce48c0d..3429861 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
  */
 
 #include <sys/queue.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_eal.h>
 #include <rte_atomic.h>
 #include <rte_malloc.h>
@@ -77,6 +47,8 @@
                }                                               \
        } while (0)
 
+#define        IGB_FLEX_RAW_NUM        12
+
 /**
  * Please aware there's an asumption for all the parsers.
  * rte_flow_item is using big endian, rte_flow_attr and
@@ -203,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+       ipv4_mask = item->mask;
        /**
         * Only support src & dst addresses, protocol,
         * others should be masked.
@@ -226,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        filter->src_ip_mask = ipv4_mask->hdr.src_addr;
        filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-       ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+       ipv4_spec = item->spec;
        filter->dst_ip = ipv4_spec->hdr.dst_addr;
        filter->src_ip = ipv4_spec->hdr.src_addr;
        filter->proto  = ipv4_spec->hdr.next_proto_id;
@@ -256,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        /* get the TCP/UDP/SCTP info */
        if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
                if (item->spec && item->mask) {
-                       tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+                       tcp_mask = item->mask;
 
                        /**
                         * Only support src & dst ports, tcp flags,
@@ -291,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                                return -rte_errno;
                        }
 
-                       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+                       tcp_spec = item->spec;
                        filter->dst_port  = tcp_spec->hdr.dst_port;
                        filter->src_port  = tcp_spec->hdr.src_port;
                        filter->tcp_flags = tcp_spec->hdr.tcp_flags;
                }
        } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
                if (item->spec && item->mask) {
-                       udp_mask = (const struct rte_flow_item_udp *)item->mask;
+                       udp_mask = item->mask;
 
                        /**
                         * Only support src & dst ports,
@@ -317,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                        filter->dst_port_mask = udp_mask->hdr.dst_port;
                        filter->src_port_mask = udp_mask->hdr.src_port;
 
-                       udp_spec = (const struct rte_flow_item_udp *)item->spec;
+                       udp_spec = item->spec;
                        filter->dst_port = udp_spec->hdr.dst_port;
                        filter->src_port = udp_spec->hdr.src_port;
                }
        } else {
                if (item->spec && item->mask) {
-                       sctp_mask = (const struct rte_flow_item_sctp *)
-                                       item->mask;
+                       sctp_mask = item->mask;
 
                        /**
                         * Only support src & dst ports,
@@ -408,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                  attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        if (attr->priority > 0xFFFF) {
                memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
                rte_flow_error_set(error, EINVAL,
@@ -561,16 +541,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+       eth_spec = item->spec;
+       eth_mask = item->mask;
 
        /* Mask bits of source MAC address must be full of 0.
         * Mask bits of destination MAC address must be full
         * of 1 or full of 0.
         */
-       if (!is_zero_ether_addr(&eth_mask->src) ||
-           (!is_zero_ether_addr(&eth_mask->dst) &&
-            !is_broadcast_ether_addr(&eth_mask->dst))) {
+       if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+           (!rte_is_zero_ether_addr(&eth_mask->dst) &&
+            !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
                rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                item, "Invalid ether address mask");
@@ -587,7 +567,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
        /* If mask bits of destination MAC address
         * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
         */
-       if (is_broadcast_ether_addr(&eth_mask->dst)) {
+       if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
                filter->mac_addr = eth_spec->dst;
                filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
        } else {
@@ -652,6 +632,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* Not supported */
+       if (attr->transfer) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                               attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* Not supported */
        if (attr->priority) {
                rte_flow_error_set(error, EINVAL,
@@ -692,7 +680,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
 
        if (hw->mac.type == e1000_82576) {
                if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
-                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       memset(filter, 0, sizeof(
+                                       struct rte_eth_ethertype_filter));
                        rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                NULL, "queue number not supported "
@@ -701,7 +690,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
                }
        } else {
                if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
-                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       memset(filter, 0, sizeof(
+                                       struct rte_eth_ethertype_filter));
                        rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                NULL, "queue number not supported "
@@ -710,8 +700,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
                }
        }
 
-       if (filter->ether_type == ETHER_TYPE_IPv4 ||
-               filter->ether_type == ETHER_TYPE_IPv6) {
+       if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+               filter->ether_type == RTE_ETHER_TYPE_IPv6) {
                memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -874,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
-       tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+       tcp_spec = item->spec;
+       tcp_mask = item->mask;
        if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
            tcp_mask->hdr.src_port ||
            tcp_mask->hdr.dst_port ||
@@ -950,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                       attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* Support 2 priorities, the lowest or highest. */
        if (!attr->priority) {
                filter->hig_pri = 0;
@@ -1043,8 +1042,11 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
        const struct rte_flow_item_raw *raw_spec;
        const struct rte_flow_item_raw *raw_mask;
        const struct rte_flow_action_queue *act_q;
-       uint32_t index, i, offset, total_offset = 0;
-       int32_t shift;
+       uint32_t index, i, offset, total_offset;
+       uint32_t max_offset = 0;
+       int32_t shift, j, raw_index = 0;
+       int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+       int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
 
        if (!pattern) {
                rte_flow_error_set(error, EINVAL,
@@ -1088,8 +1090,8 @@ item_loop:
                return -rte_errno;
        }
 
-       raw_spec = (const struct rte_flow_item_raw *)item->spec;
-       raw_mask = (const struct rte_flow_item_raw *)item->mask;
+       raw_spec = item->spec;
+       raw_mask = item->mask;
 
        if (!raw_mask->length ||
            !raw_mask->relative) {
@@ -1105,8 +1107,8 @@ item_loop:
        else
                offset = 0;
 
-       for (index = 0; index < raw_spec->length; index++) {
-               if (raw_mask->pattern[index] != 0xFF) {
+       for (j = 0; j < raw_spec->length; j++) {
+               if (raw_mask->pattern[j] != 0xFF) {
                        memset(filter, 0, sizeof(struct rte_eth_flex_filter));
                        rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1115,6 +1117,21 @@ item_loop:
                }
        }
 
+       total_offset = 0;
+
+       if (raw_spec->relative) {
+               for (j = raw_index; j > 0; j--) {
+                       total_offset += raw_offset[j - 1];
+                       if (!relative[j - 1])
+                               break;
+               }
+               if (total_offset + raw_spec->length + offset > max_offset)
+                       max_offset = total_offset + raw_spec->length + offset;
+       } else {
+               if (raw_spec->length + offset > max_offset)
+                       max_offset = raw_spec->length + offset;
+       }
+
        if ((raw_spec->length + offset + total_offset) >
                        RTE_FLEX_FILTER_MAXLEN) {
                memset(filter, 0, sizeof(struct rte_eth_flex_filter));
@@ -1125,30 +1142,35 @@ item_loop:
        }
 
        if (raw_spec->relative == 0) {
-               for (index = 0; index < raw_spec->length; index++)
-                       filter->bytes[index] = raw_spec->pattern[index];
-               index = offset / CHAR_BIT;
+               for (j = 0; j < raw_spec->length; j++)
+                       filter->bytes[offset + j] =
+                       raw_spec->pattern[j];
+               j = offset / CHAR_BIT;
+               shift = offset % CHAR_BIT;
        } else {
-               for (index = 0; index < raw_spec->length; index++)
-                       filter->bytes[total_offset + index] =
-                               raw_spec->pattern[index];
-               index = (total_offset + offset) / CHAR_BIT;
+               for (j = 0; j < raw_spec->length; j++)
+                       filter->bytes[total_offset + offset + j] =
+                               raw_spec->pattern[j];
+               j = (total_offset + offset) / CHAR_BIT;
+               shift = (total_offset + offset) % CHAR_BIT;
        }
 
        i = 0;
 
-       for (shift = offset % CHAR_BIT; shift < CHAR_BIT; shift++) {
-               filter->mask[index] |= (0x80 >> shift);
+       for ( ; shift < CHAR_BIT; shift++) {
+               filter->mask[j] |= (0x80 >> shift);
                i++;
                if (i == raw_spec->length)
                        break;
                if (shift == (CHAR_BIT - 1)) {
-                       index++;
+                       j++;
                        shift = -1;
                }
        }
 
-       total_offset += offset + raw_spec->length;
+       relative[raw_index] = raw_spec->relative;
+       raw_offset[raw_index] = offset + raw_spec->length;
+       raw_index++;
 
        /* check if the next not void item is RAW */
        index++;
@@ -1167,7 +1189,7 @@ item_loop:
                goto item_loop;
        }
 
-       filter->len = RTE_ALIGN(total_offset, 8);
+       filter->len = RTE_ALIGN(max_offset, 8);
 
        /* parse action */
        index = 0;
@@ -1215,6 +1237,15 @@ item_loop:
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                       attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        if (attr->priority > 0xFFFF) {
                memset(filter, 0, sizeof(struct rte_eth_flex_filter));
                rte_flow_error_set(error, EINVAL,
@@ -1269,6 +1300,126 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+igb_parse_rss_filter(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_action actions[],
+                       struct igb_rte_flow_rss_conf *rss_conf,
+                       struct rte_flow_error *error)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       const struct rte_flow_action *act;
+       const struct rte_flow_action_rss *rss;
+       uint16_t n, index;
+
+       /**
+        * rss only supports forwarding,
+        * check if the first not void action is RSS.
+        */
+       index = 0;
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+               memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       rss = (const struct rte_flow_action_rss *)act->conf;
+
+       if (!rss || !rss->queue_num) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               act,
+                          "no valid queues");
+               return -rte_errno;
+       }
+
+       for (n = 0; n < rss->queue_num; n++) {
+               if (rss->queue[n] >= dev->data->nb_rx_queues) {
+                       rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION,
+                                  act,
+                                  "queue id > max number of queues");
+                       return -rte_errno;
+               }
+       }
+
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "non-default RSS hash functions are not supported");
+       if (rss->level)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "a nonzero RSS encapsulation level is not supported");
+       if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS hash key must be exactly 40 bytes");
+       if (((hw->mac.type == e1000_82576) &&
+            (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
+           ((hw->mac.type != e1000_82576) &&
+            (rss->queue_num > IGB_MAX_RX_QUEUE_NUM)))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "too many queues for RSS context");
+       if (igb_rss_conf_init(dev, rss_conf, rss))
+               return rte_flow_error_set
+                       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS context initialization failure");
+
+       /* check if the next not void item is END */
+       index++;
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       /* parse attr */
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+                                  attr, "Only support ingress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+                                  attr, "Not support egress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->transfer) {
+               memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                  attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
+       if (attr->priority > 0xFFFF) {
+               memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                  attr, "Error priority.");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
 /**
  * Create a flow rule.
  * Theorically one rule can match more than one filters.
@@ -1287,11 +1438,13 @@ igb_flow_create(struct rte_eth_dev *dev,
        struct rte_eth_ethertype_filter ethertype_filter;
        struct rte_eth_syn_filter syn_filter;
        struct rte_eth_flex_filter flex_filter;
+       struct igb_rte_flow_rss_conf rss_conf;
        struct rte_flow *flow = NULL;
        struct igb_ntuple_filter_ele *ntuple_filter_ptr;
        struct igb_ethertype_filter_ele *ethertype_filter_ptr;
        struct igb_eth_syn_filter_ele *syn_filter_ptr;
        struct igb_flex_filter_ele *flex_filter_ptr;
+       struct igb_rss_conf_ele *rss_filter_ptr;
        struct igb_flow_mem *igb_flow_mem_ptr;
 
        flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
@@ -1319,7 +1472,12 @@ igb_flow_create(struct rte_eth_dev *dev,
                if (!ret) {
                        ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
                                sizeof(struct igb_ntuple_filter_ele), 0);
-                       (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+                       if (!ntuple_filter_ptr) {
+                               PMD_DRV_LOG(ERR, "failed to allocate memory");
+                               goto out;
+                       }
+
+                       rte_memcpy(&ntuple_filter_ptr->filter_info,
                                &ntuple_filter,
                                sizeof(struct rte_eth_ntuple_filter));
                        TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
@@ -1341,7 +1499,12 @@ igb_flow_create(struct rte_eth_dev *dev,
                        ethertype_filter_ptr = rte_zmalloc(
                                "igb_ethertype_filter",
                                sizeof(struct igb_ethertype_filter_ele), 0);
-                       (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+                       if (!ethertype_filter_ptr) {
+                               PMD_DRV_LOG(ERR, "failed to allocate memory");
+                               goto out;
+                       }
+
+                       rte_memcpy(&ethertype_filter_ptr->filter_info,
                                &ethertype_filter,
                                sizeof(struct rte_eth_ethertype_filter));
                        TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
@@ -1361,7 +1524,12 @@ igb_flow_create(struct rte_eth_dev *dev,
                if (!ret) {
                        syn_filter_ptr = rte_zmalloc("igb_syn_filter",
                                sizeof(struct igb_eth_syn_filter_ele), 0);
-                       (void)rte_memcpy(&syn_filter_ptr->filter_info,
+                       if (!syn_filter_ptr) {
+                               PMD_DRV_LOG(ERR, "failed to allocate memory");
+                               goto out;
+                       }
+
+                       rte_memcpy(&syn_filter_ptr->filter_info,
                                &syn_filter,
                                sizeof(struct rte_eth_syn_filter));
                        TAILQ_INSERT_TAIL(&igb_filter_syn_list,
@@ -1382,7 +1550,12 @@ igb_flow_create(struct rte_eth_dev *dev,
                if (!ret) {
                        flex_filter_ptr = rte_zmalloc("igb_flex_filter",
                                sizeof(struct igb_flex_filter_ele), 0);
-                       (void)rte_memcpy(&flex_filter_ptr->filter_info,
+                       if (!flex_filter_ptr) {
+                               PMD_DRV_LOG(ERR, "failed to allocate memory");
+                               goto out;
+                       }
+
+                       rte_memcpy(&flex_filter_ptr->filter_info,
                                &flex_filter,
                                sizeof(struct rte_eth_flex_filter));
                        TAILQ_INSERT_TAIL(&igb_filter_flex_list,
@@ -1393,6 +1566,28 @@ igb_flow_create(struct rte_eth_dev *dev,
                }
        }
 
+       memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+       ret = igb_parse_rss_filter(dev, attr,
+                                       actions, &rss_conf, error);
+       if (!ret) {
+               ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
+               if (!ret) {
+                       rss_filter_ptr = rte_zmalloc("igb_rss_filter",
+                               sizeof(struct igb_rss_conf_ele), 0);
+                       if (!rss_filter_ptr) {
+                               PMD_DRV_LOG(ERR, "failed to allocate memory");
+                               goto out;
+                       }
+                       igb_rss_conf_init(dev, &rss_filter_ptr->filter_info,
+                                         &rss_conf.conf);
+                       TAILQ_INSERT_TAIL(&igb_filter_rss_list,
+                               rss_filter_ptr, entries);
+                       flow->rule = rss_filter_ptr;
+                       flow->filter_type = RTE_ETH_FILTER_HASH;
+                       return flow;
+               }
+       }
+
 out:
        TAILQ_REMOVE(&igb_flow_list,
                igb_flow_mem_ptr, entries);
@@ -1420,6 +1615,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
        struct rte_eth_ethertype_filter ethertype_filter;
        struct rte_eth_syn_filter syn_filter;
        struct rte_eth_flex_filter flex_filter;
+       struct igb_rte_flow_rss_conf rss_conf;
        int ret;
 
        memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -1443,6 +1639,12 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
        memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
        ret = igb_parse_flex_filter(dev, attr, pattern,
                                actions, &flex_filter, error);
+       if (!ret)
+               return 0;
+
+       memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+       ret = igb_parse_rss_filter(dev, attr,
+                                       actions, &rss_conf, error);
 
        return ret;
 }
@@ -1461,6 +1663,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
        struct igb_eth_syn_filter_ele *syn_filter_ptr;
        struct igb_flex_filter_ele *flex_filter_ptr;
        struct igb_flow_mem *igb_flow_mem_ptr;
+       struct igb_rss_conf_ele *rss_filter_ptr;
 
        switch (filter_type) {
        case RTE_ETH_FILTER_NTUPLE:
@@ -1507,6 +1710,17 @@ igb_flow_destroy(struct rte_eth_dev *dev,
                        rte_free(flex_filter_ptr);
                }
                break;
+       case RTE_ETH_FILTER_HASH:
+               rss_filter_ptr = (struct igb_rss_conf_ele *)
+                               pmd_flow->rule;
+               ret = igb_config_rss_filter(dev,
+                                       &rss_filter_ptr->filter_info, FALSE);
+               if (!ret) {
+                       TAILQ_REMOVE(&igb_filter_rss_list,
+                               rss_filter_ptr, entries);
+                       rte_free(rss_filter_ptr);
+               }
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                            filter_type);
@@ -1595,6 +1809,17 @@ igb_clear_all_flex_filter(struct rte_eth_dev *dev)
                igb_remove_flex_filter(dev, flex_filter);
 }
 
+/* remove the rss filter */
+static void
+igb_clear_rss_filter(struct rte_eth_dev *dev)
+{
+       struct e1000_filter_info *filter =
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+       if (filter->rss_info.conf.queue_num)
+               igb_config_rss_filter(dev, &filter->rss_info, FALSE);
+}
+
 void
 igb_filterlist_flush(struct rte_eth_dev *dev)
 {
@@ -1602,6 +1827,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
        struct igb_ethertype_filter_ele *ethertype_filter_ptr;
        struct igb_eth_syn_filter_ele *syn_filter_ptr;
        struct igb_flex_filter_ele *flex_filter_ptr;
+       struct igb_rss_conf_ele  *rss_filter_ptr;
        struct igb_flow_mem *igb_flow_mem_ptr;
        enum rte_filter_type filter_type;
        struct rte_flow *pmd_flow;
@@ -1644,6 +1870,14 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
                                                flex_filter_ptr, entries);
                                rte_free(flex_filter_ptr);
                                break;
+                       case RTE_ETH_FILTER_HASH:
+                               rss_filter_ptr =
+                                       (struct igb_rss_conf_ele *)
+                                               pmd_flow->rule;
+                               TAILQ_REMOVE(&igb_filter_rss_list,
+                                               rss_filter_ptr, entries);
+                               rte_free(rss_filter_ptr);
+                               break;
                        default:
                                PMD_DRV_LOG(WARNING, "Filter type"
                                        "(%d) not supported", filter_type);
@@ -1667,15 +1901,15 @@ igb_flow_flush(struct rte_eth_dev *dev,
        igb_clear_all_ethertype_filter(dev);
        igb_clear_syn_filter(dev);
        igb_clear_all_flex_filter(dev);
+       igb_clear_rss_filter(dev);
        igb_filterlist_flush(dev);
 
        return 0;
 }
 
 const struct rte_flow_ops igb_flow_ops = {
-       igb_flow_validate,
-       igb_flow_create,
-       igb_flow_destroy,
-       igb_flow_flush,
-       NULL,
+       .validate = igb_flow_validate,
+       .create = igb_flow_create,
+       .destroy = igb_flow_destroy,
+       .flush = igb_flow_flush,
 };