net/iavf: enable interrupt polling
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
index dcbfb38..511b612 100644 (file)
@@ -25,7 +25,7 @@
 #include <rte_eal.h>
 #include <rte_alarm.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_random.h>
 #include <rte_dev.h>
@@ -72,7 +72,7 @@ struct ixgbe_fdir_rule_ele {
 /* l2_tunnel filter list structure */
 struct ixgbe_eth_l2_tunnel_conf_ele {
        TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
-       struct rte_eth_l2_tunnel_conf filter_info;
+       struct ixgbe_l2_tunnel_conf filter_info;
 };
 /* rss filter list structure */
 struct ixgbe_rss_conf_ele {
@@ -215,7 +215,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
        memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        /**
         *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
         */
@@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        }
        /* Skip Ethernet */
        if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
-               eth_spec = (const struct rte_flow_item_eth *)item->spec;
-               eth_mask = (const struct rte_flow_item_eth *)item->mask;
+               eth_spec = item->spec;
+               eth_mask = item->mask;
                /*Not supported last point for range*/
                if (item->last) {
                        rte_flow_error_set(error,
@@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        }
 
        if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
-               vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
-               vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+               vlan_spec = item->spec;
+               vlan_mask = item->mask;
                /*Not supported last point for range*/
                if (item->last) {
                        rte_flow_error_set(error,
@@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                        return -rte_errno;
                }
 
-               ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+               ipv4_mask = item->mask;
                /**
                 * Only support src & dst addresses, protocol,
                 * others should be masked.
@@ -363,12 +363,23 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                                item, "Not supported by ntuple filter");
                        return -rte_errno;
                }
+               if ((ipv4_mask->hdr.src_addr != 0 &&
+                       ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+                       (ipv4_mask->hdr.dst_addr != 0 &&
+                       ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+                       (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+                       ipv4_mask->hdr.next_proto_id != 0)) {
+                       rte_flow_error_set(error,
+                               EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
 
                filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
                filter->src_ip_mask = ipv4_mask->hdr.src_addr;
                filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-               ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+               ipv4_spec = item->spec;
                filter->dst_ip = ipv4_spec->hdr.dst_addr;
                filter->src_ip = ipv4_spec->hdr.src_addr;
                filter->proto  = ipv4_spec->hdr.next_proto_id;
@@ -413,7 +424,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
        }
 
        if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
-               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+               tcp_mask = item->mask;
 
                /**
                 * Only support src & dst ports, tcp flags,
@@ -432,6 +443,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                                item, "Not supported by ntuple filter");
                        return -rte_errno;
                }
+               if ((tcp_mask->hdr.src_port != 0 &&
+                       tcp_mask->hdr.src_port != UINT16_MAX) ||
+                       (tcp_mask->hdr.dst_port != 0 &&
+                       tcp_mask->hdr.dst_port != UINT16_MAX)) {
+                       rte_flow_error_set(error,
+                               EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
 
                filter->dst_port_mask  = tcp_mask->hdr.dst_port;
                filter->src_port_mask  = tcp_mask->hdr.src_port;
@@ -447,12 +467,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                        return -rte_errno;
                }
 
-               tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+               tcp_spec = item->spec;
                filter->dst_port  = tcp_spec->hdr.dst_port;
                filter->src_port  = tcp_spec->hdr.src_port;
                filter->tcp_flags = tcp_spec->hdr.tcp_flags;
        } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
-               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+               udp_mask = item->mask;
 
                /**
                 * Only support src & dst ports,
@@ -467,15 +487,24 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                                item, "Not supported by ntuple filter");
                        return -rte_errno;
                }
+               if ((udp_mask->hdr.src_port != 0 &&
+                       udp_mask->hdr.src_port != UINT16_MAX) ||
+                       (udp_mask->hdr.dst_port != 0 &&
+                       udp_mask->hdr.dst_port != UINT16_MAX)) {
+                       rte_flow_error_set(error,
+                               EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
 
                filter->dst_port_mask = udp_mask->hdr.dst_port;
                filter->src_port_mask = udp_mask->hdr.src_port;
 
-               udp_spec = (const struct rte_flow_item_udp *)item->spec;
+               udp_spec = item->spec;
                filter->dst_port = udp_spec->hdr.dst_port;
                filter->src_port = udp_spec->hdr.src_port;
        } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
-               sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+               sctp_mask = item->mask;
 
                /**
                 * Only support src & dst ports,
@@ -494,7 +523,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                filter->dst_port_mask = sctp_mask->hdr.dst_port;
                filter->src_port_mask = sctp_mask->hdr.src_port;
 
-               sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+               sctp_spec = item->spec;
                filter->dst_port = sctp_spec->hdr.dst_port;
                filter->src_port = sctp_spec->hdr.src_port;
        } else {
@@ -557,6 +586,15 @@ action:
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                  attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        if (attr->priority > 0xFFFF) {
                memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
                rte_flow_error_set(error, EINVAL,
@@ -591,7 +629,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
        if (ret)
                return ret;
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        /* ESP flow not really a flow*/
        if (filter->proto == IPPROTO_ESP)
                return 0;
@@ -699,16 +737,16 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       eth_spec = (const struct rte_flow_item_eth *)item->spec;
-       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+       eth_spec = item->spec;
+       eth_mask = item->mask;
 
        /* Mask bits of source MAC address must be full of 0.
         * Mask bits of destination MAC address must be full
         * of 1 or full of 0.
         */
-       if (!is_zero_ether_addr(&eth_mask->src) ||
-           (!is_zero_ether_addr(&eth_mask->dst) &&
-            !is_broadcast_ether_addr(&eth_mask->dst))) {
+       if (!rte_is_zero_ether_addr(&eth_mask->src) ||
+           (!rte_is_zero_ether_addr(&eth_mask->dst) &&
+            !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
                rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                item, "Invalid ether address mask");
@@ -725,7 +763,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
        /* If mask bits of destination MAC address
         * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
         */
-       if (is_broadcast_ether_addr(&eth_mask->dst)) {
+       if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
                filter->mac_addr = eth_spec->dst;
                filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
        } else {
@@ -786,6 +824,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* Not supported */
+       if (attr->transfer) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                               attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* Not supported */
        if (attr->priority) {
                rte_flow_error_set(error, EINVAL,
@@ -824,15 +870,6 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
        if (ret)
                return ret;
 
-       /* Ixgbe doesn't support MAC address. */
-       if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
-               memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       NULL, "Not supported by ethertype filter");
-               return -rte_errno;
-       }
-
        if (filter->queue >= dev->data->nb_rx_queues) {
                memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
                rte_flow_error_set(error, EINVAL,
@@ -841,8 +878,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
                return -rte_errno;
        }
 
-       if (filter->ether_type == ETHER_TYPE_IPv4 ||
-               filter->ether_type == ETHER_TYPE_IPv6) {
+       if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+               filter->ether_type == RTE_ETHER_TYPE_IPV6) {
                memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1000,15 +1037,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
-       tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
-       if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+       tcp_spec = item->spec;
+       tcp_mask = item->mask;
+       if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
            tcp_mask->hdr.src_port ||
            tcp_mask->hdr.dst_port ||
            tcp_mask->hdr.sent_seq ||
            tcp_mask->hdr.recv_ack ||
            tcp_mask->hdr.data_off ||
-           tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+           tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
            tcp_mask->hdr.rx_win ||
            tcp_mask->hdr.cksum ||
            tcp_mask->hdr.tcp_urp) {
@@ -1078,6 +1115,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                       attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* Support 2 priorities, the lowest or highest. */
        if (!attr->priority) {
                filter->hig_pri = 0;
@@ -1142,7 +1188,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item pattern[],
                        const struct rte_flow_action actions[],
-                       struct rte_eth_l2_tunnel_conf *filter,
+                       struct ixgbe_l2_tunnel_conf *filter,
                        struct rte_flow_error *error)
 {
        const struct rte_flow_item *item;
@@ -1176,7 +1222,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        /* The first not void item should be e-tag. */
        item = next_no_void_pattern(pattern, NULL);
        if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
                        item, "Not supported by L2 tunnel filter");
@@ -1184,7 +1230,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        }
 
        if (!item->spec || !item->mask) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
                        item, "Not supported by L2 tunnel filter");
                return -rte_errno;
@@ -1198,15 +1244,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
                return -rte_errno;
        }
 
-       e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
-       e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+       e_tag_spec = item->spec;
+       e_tag_mask = item->mask;
 
        /* Only care about GRP and E cid base. */
        if (e_tag_mask->epcp_edei_in_ecid_b ||
            e_tag_mask->in_ecid_e ||
            e_tag_mask->ecid_e ||
            e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
                        item, "Not supported by L2 tunnel filter");
@@ -1223,7 +1269,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        /* check if the next not void item is END */
        item = next_no_void_pattern(pattern, item);
        if (item->type != RTE_FLOW_ITEM_TYPE_END) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
                        item, "Not supported by L2 tunnel filter");
@@ -1233,7 +1279,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        /* parse attr */
        /* must be input direction */
        if (!attr->ingress) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
                        attr, "Only support ingress.");
@@ -1242,16 +1288,25 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
 
        /* not supported */
        if (attr->egress) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
                        attr, "Not support egress.");
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                       attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* not supported */
        if (attr->priority) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
                        attr, "Not support priority.");
@@ -1262,7 +1317,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        act = next_no_void_action(actions, NULL);
        if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
                        act->type != RTE_FLOW_ACTION_TYPE_PF) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ACTION,
                        act, "Not supported action.");
@@ -1279,7 +1334,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
        /* check if the next not void item is END */
        act = next_no_void_action(actions, act);
        if (act->type != RTE_FLOW_ACTION_TYPE_END) {
-               memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ACTION,
                        act, "Not supported action.");
@@ -1294,7 +1349,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
                        const struct rte_flow_attr *attr,
                        const struct rte_flow_item pattern[],
                        const struct rte_flow_action actions[],
-                       struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+                       struct ixgbe_l2_tunnel_conf *l2_tn_filter,
                        struct rte_flow_error *error)
 {
        int ret = 0;
@@ -1308,7 +1363,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
        if (hw->mac.type != ixgbe_mac_X550 &&
                hw->mac.type != ixgbe_mac_X550EM_x &&
                hw->mac.type != ixgbe_mac_X550EM_a) {
-               memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+               memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
                        NULL, "Not supported by L2 tunnel filter");
@@ -1353,6 +1408,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                       attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        /* not supported */
        if (attr->priority) {
                memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1447,12 +1511,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
                        break;
 
                if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
-                       spec =
-                       (const struct rte_flow_item_fuzzy *)item->spec;
-                       last =
-                       (const struct rte_flow_item_fuzzy *)item->last;
-                       mask =
-                       (const struct rte_flow_item_fuzzy *)item->mask;
+                       spec = item->spec;
+                       last = item->last;
+                       mask = item->mask;
 
                        if (!spec || !mask)
                                return 0;
@@ -1632,10 +1693,10 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
+                       eth_spec = item->spec;
 
                        /* Get the dst MAC. */
-                       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                       for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
                                rule->ixgbe_fdir.formatted.inner_mac[j] =
                                        eth_spec->dst.addr_bytes[j];
                        }
@@ -1645,7 +1706,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                if (item->mask) {
 
                        rule->b_mask = TRUE;
-                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+                       eth_mask = item->mask;
 
                        /* Ether type should be masked. */
                        if (eth_mask->type ||
@@ -1664,7 +1725,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                         * src MAC address must be masked,
                         * and don't support dst MAC address mask.
                         */
-                       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                       for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
                                if (eth_mask->src.addr_bytes[j] ||
                                        eth_mask->dst.addr_bytes[j] != 0xFF) {
                                        memset(rule, 0,
@@ -1698,7 +1759,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                                return -rte_errno;
                        }
                } else {
-                       if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+                       if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+                                       item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
                                memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                                rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1725,8 +1787,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
 
-               vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
-               vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+               vlan_spec = item->spec;
+               vlan_mask = item->mask;
 
                rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
 
@@ -1772,8 +1834,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
                rule->b_mask = TRUE;
-               ipv4_mask =
-                       (const struct rte_flow_item_ipv4 *)item->mask;
+               ipv4_mask = item->mask;
                if (ipv4_mask->hdr.version_ihl ||
                    ipv4_mask->hdr.type_of_service ||
                    ipv4_mask->hdr.total_length ||
@@ -1793,8 +1854,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       ipv4_spec =
-                               (const struct rte_flow_item_ipv4 *)item->spec;
+                       ipv4_spec = item->spec;
                        rule->ixgbe_fdir.formatted.dst_ip[0] =
                                ipv4_spec->hdr.dst_addr;
                        rule->ixgbe_fdir.formatted.src_ip[0] =
@@ -1844,8 +1904,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                }
 
                rule->b_mask = TRUE;
-               ipv6_mask =
-                       (const struct rte_flow_item_ipv6 *)item->mask;
+               ipv6_mask = item->mask;
                if (ipv6_mask->hdr.vtc_flow ||
                    ipv6_mask->hdr.payload_len ||
                    ipv6_mask->hdr.proto ||
@@ -1885,8 +1944,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       ipv6_spec =
-                               (const struct rte_flow_item_ipv6 *)item->spec;
+                       ipv6_spec = item->spec;
                        rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
                                   ipv6_spec->hdr.src_addr, 16);
                        rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
@@ -1938,7 +1996,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
                rule->b_mask = TRUE;
-               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+               tcp_mask = item->mask;
                if (tcp_mask->hdr.sent_seq ||
                    tcp_mask->hdr.recv_ack ||
                    tcp_mask->hdr.data_off ||
@@ -1957,7 +2015,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+                       tcp_spec = item->spec;
                        rule->ixgbe_fdir.formatted.src_port =
                                tcp_spec->hdr.src_port;
                        rule->ixgbe_fdir.formatted.dst_port =
@@ -2003,7 +2061,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
                rule->b_mask = TRUE;
-               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+               udp_mask = item->mask;
                if (udp_mask->hdr.dgram_len ||
                    udp_mask->hdr.dgram_cksum) {
                        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2017,7 +2075,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       udp_spec = (const struct rte_flow_item_udp *)item->spec;
+                       udp_spec = item->spec;
                        rule->ixgbe_fdir.formatted.src_port =
                                udp_spec->hdr.src_port;
                        rule->ixgbe_fdir.formatted.dst_port =
@@ -2068,8 +2126,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                                return -rte_errno;
                        }
                        rule->b_mask = TRUE;
-                       sctp_mask =
-                               (const struct rte_flow_item_sctp *)item->mask;
+                       sctp_mask = item->mask;
                        if (sctp_mask->hdr.tag ||
                                sctp_mask->hdr.cksum) {
                                memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2083,8 +2140,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
 
                        if (item->spec) {
                                rule->b_spec = TRUE;
-                               sctp_spec =
-                               (const struct rte_flow_item_sctp *)item->spec;
+                               sctp_spec = item->spec;
                                rule->ixgbe_fdir.formatted.src_port =
                                        sctp_spec->hdr.src_port;
                                rule->ixgbe_fdir.formatted.dst_port =
@@ -2092,8 +2148,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        }
                /* others even sctp port is not supported */
                } else {
-                       sctp_mask =
-                               (const struct rte_flow_item_sctp *)item->mask;
+                       sctp_mask = item->mask;
                        if (sctp_mask &&
                                (sctp_mask->hdr.src_port ||
                                 sctp_mask->hdr.dst_port ||
@@ -2136,7 +2191,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
 
-               raw_mask = (const struct rte_flow_item_raw *)item->mask;
+               raw_mask = item->mask;
 
                /* check mask */
                if (raw_mask->relative != 0x1 ||
@@ -2152,7 +2207,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
 
-               raw_spec = (const struct rte_flow_item_raw *)item->spec;
+               raw_spec = item->spec;
 
                /* check spec */
                if (raw_spec->relative != 0 ||
@@ -2403,7 +2458,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
        /* Get the VxLAN info */
        if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
                rule->ixgbe_fdir.formatted.tunnel_type =
-                       RTE_FDIR_TUNNEL_TYPE_VXLAN;
+                               IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
 
                /* Only care about VNI, others should be masked. */
                if (!item->mask) {
@@ -2425,8 +2480,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                /* Tunnel type is always meaningful. */
                rule->mask.tunnel_type_mask = 1;
 
-               vxlan_mask =
-                       (const struct rte_flow_item_vxlan *)item->mask;
+               vxlan_mask = item->mask;
                if (vxlan_mask->flags) {
                        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                        rte_flow_error_set(error, EINVAL,
@@ -2452,20 +2506,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       vxlan_spec = (const struct rte_flow_item_vxlan *)
-                                       item->spec;
+                       vxlan_spec = item->spec;
                        rte_memcpy(((uint8_t *)
-                               &rule->ixgbe_fdir.formatted.tni_vni + 1),
+                               &rule->ixgbe_fdir.formatted.tni_vni),
                                vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
-                       rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
-                               rule->ixgbe_fdir.formatted.tni_vni);
                }
        }
 
        /* Get the NVGRE info */
        if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
                rule->ixgbe_fdir.formatted.tunnel_type =
-                       RTE_FDIR_TUNNEL_TYPE_NVGRE;
+                               IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
 
                /**
                 * Only care about flags0, flags1, protocol and TNI,
@@ -2490,8 +2541,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                /* Tunnel type is always meaningful. */
                rule->mask.tunnel_type_mask = 1;
 
-               nvgre_mask =
-                       (const struct rte_flow_item_nvgre *)item->mask;
+               nvgre_mask = item->mask;
                if (nvgre_mask->flow_id) {
                        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                        rte_flow_error_set(error, EINVAL,
@@ -2534,8 +2584,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 
                if (item->spec) {
                        rule->b_spec = TRUE;
-                       nvgre_spec =
-                               (const struct rte_flow_item_nvgre *)item->spec;
+                       nvgre_spec = item->spec;
                        if (nvgre_spec->c_k_s_rsvd0_ver !=
                            rte_cpu_to_be_16(0x2000) &&
                                nvgre_mask->c_k_s_rsvd0_ver) {
@@ -2557,7 +2606,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                        /* tni is a 24-bits bit field */
                        rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
                        nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
-                       rule->ixgbe_fdir.formatted.tni_vni <<= 8;
                }
        }
 
@@ -2591,7 +2639,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
        rule->b_mask = TRUE;
-       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+       eth_mask = item->mask;
 
        /* Ether type should be masked. */
        if (eth_mask->type) {
@@ -2603,7 +2651,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
        }
 
        /* src MAC address should be masked. */
-       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+       for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
                if (eth_mask->src.addr_bytes[j]) {
                        memset(rule, 0,
                               sizeof(struct ixgbe_fdir_rule));
@@ -2614,7 +2662,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                }
        }
        rule->mask.mac_addr_byte_mask = 0;
-       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+       for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
                /* It's a per byte mask. */
                if (eth_mask->dst.addr_bytes[j] == 0xFF) {
                        rule->mask.mac_addr_byte_mask |= 0x1 << j;
@@ -2632,10 +2680,10 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
 
        if (item->spec) {
                rule->b_spec = TRUE;
-               eth_spec = (const struct rte_flow_item_eth *)item->spec;
+               eth_spec = item->spec;
 
                /* Get the dst MAC. */
-               for (j = 0; j < ETHER_ADDR_LEN; j++) {
+               for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
                        rule->ixgbe_fdir.formatted.inner_mac[j] =
                                eth_spec->dst.addr_bytes[j];
                }
@@ -2671,8 +2719,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
                        return -rte_errno;
                }
 
-               vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
-               vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+               vlan_spec = item->spec;
+               vlan_mask = item->mask;
 
                rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
 
@@ -2775,7 +2823,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
 
        rss = (const struct rte_flow_action_rss *)act->conf;
 
-       if (!rss || !rss->num) {
+       if (!rss || !rss->queue_num) {
                rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ACTION,
                                act,
@@ -2783,7 +2831,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
                return -rte_errno;
        }
 
-       for (n = 0; n < rss->num; n++) {
+       for (n = 0; n < rss->queue_num; n++) {
                if (rss->queue[n] >= dev->data->nb_rx_queues) {
                        rte_flow_error_set(error, EINVAL,
                                   RTE_FLOW_ERROR_TYPE_ACTION,
@@ -2792,19 +2840,32 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
                        return -rte_errno;
                }
        }
-       if (rss->rss_conf)
-               rss_conf->rss_conf = *rss->rss_conf;
-       else
-               rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
 
-       for (n = 0; n < rss->num; ++n)
-               rss_conf->queue[n] = rss->queue[n];
-       rss_conf->num = rss->num;
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "non-default RSS hash functions are not supported");
+       if (rss->level)
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "a nonzero RSS encapsulation level is not supported");
+       if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS hash key must be exactly 40 bytes");
+       if (rss->queue_num > RTE_DIM(rss_conf->queue))
+               return rte_flow_error_set
+                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "too many queues for RSS context");
+       if (ixgbe_rss_conf_init(rss_conf, rss))
+               return rte_flow_error_set
+                       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+                        "RSS context initialization failure");
 
        /* check if the next not void item is END */
        act = next_no_void_action(actions, act);
        if (act->type != RTE_FLOW_ACTION_TYPE_END) {
-               memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+               memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ACTION,
                        act, "Not supported action.");
@@ -2830,6 +2891,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
                return -rte_errno;
        }
 
+       /* not supported */
+       if (attr->transfer) {
+               memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                  attr, "No support for transfer.");
+               return -rte_errno;
+       }
+
        if (attr->priority > 0xFFFF) {
                memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
                rte_flow_error_set(error, EINVAL,
@@ -2848,7 +2918,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
        struct ixgbe_filter_info *filter_info =
                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
 
-       if (filter_info->rss_info.num)
+       if (filter_info->rss_info.conf.queue_num)
                ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
 }
 
@@ -2944,7 +3014,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
        struct rte_eth_ethertype_filter ethertype_filter;
        struct rte_eth_syn_filter syn_filter;
        struct ixgbe_fdir_rule fdir_rule;
-       struct rte_eth_l2_tunnel_conf l2_tn_filter;
+       struct ixgbe_l2_tunnel_conf l2_tn_filter;
        struct ixgbe_hw_fdir_info *fdir_info =
                IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
        struct ixgbe_rte_flow_rss_conf rss_conf;
@@ -2978,7 +3048,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
        ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
                        actions, &ntuple_filter, error);
 
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
        /* ESP flow not really a flow*/
        if (ntuple_filter.proto == IPPROTO_ESP)
                return flow;
@@ -3067,13 +3137,13 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
                                rte_memcpy(&fdir_info->mask,
                                        &fdir_rule.mask,
                                        sizeof(struct ixgbe_hw_fdir_mask));
-                               fdir_info->flex_bytes_offset =
-                                       fdir_rule.flex_bytes_offset;
 
-                               if (fdir_rule.mask.flex_bytes_mask)
-                                       ixgbe_fdir_set_flexbytes_offset(dev,
+                               if (fdir_rule.mask.flex_bytes_mask) {
+                                       ret = ixgbe_fdir_set_flexbytes_offset(dev,
                                                fdir_rule.flex_bytes_offset);
-
+                                       if (ret)
+                                               goto out;
+                               }
                                ret = ixgbe_fdir_set_input_mask(dev);
                                if (ret)
                                        goto out;
@@ -3091,8 +3161,9 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
                                if (ret)
                                        goto out;
 
-                               if (fdir_info->flex_bytes_offset !=
-                                               fdir_rule.flex_bytes_offset)
+                               if (fdir_rule.mask.flex_bytes_mask &&
+                                   fdir_info->flex_bytes_offset !=
+                                   fdir_rule.flex_bytes_offset)
                                        goto out;
                        }
                }
@@ -3132,7 +3203,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
                goto out;
        }
 
-       memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+       memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
        ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
                                        actions, &l2_tn_filter, error);
        if (!ret) {
@@ -3146,7 +3217,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
                        }
                        rte_memcpy(&l2_tn_filter_ptr->filter_info,
                                &l2_tn_filter,
-                               sizeof(struct rte_eth_l2_tunnel_conf));
+                               sizeof(struct ixgbe_l2_tunnel_conf));
                        TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
                                l2_tn_filter_ptr, entries);
                        flow->rule = l2_tn_filter_ptr;
@@ -3167,9 +3238,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
                                PMD_DRV_LOG(ERR, "failed to allocate memory");
                                goto out;
                        }
-                       rte_memcpy(&rss_filter_ptr->filter_info,
-                               &rss_conf,
-                               sizeof(struct ixgbe_rte_flow_rss_conf));
+                       ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+                                           &rss_conf.conf);
                        TAILQ_INSERT_TAIL(&filter_rss_list,
                                rss_filter_ptr, entries);
                        flow->rule = rss_filter_ptr;
@@ -3204,7 +3274,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
        struct rte_eth_ntuple_filter ntuple_filter;
        struct rte_eth_ethertype_filter ethertype_filter;
        struct rte_eth_syn_filter syn_filter;
-       struct rte_eth_l2_tunnel_conf l2_tn_filter;
+       struct ixgbe_l2_tunnel_conf l2_tn_filter;
        struct ixgbe_fdir_rule fdir_rule;
        struct ixgbe_rte_flow_rss_conf rss_conf;
        int ret;
@@ -3233,7 +3303,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
        if (!ret)
                return 0;
 
-       memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+       memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
        ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
                                actions, &l2_tn_filter, error);
        if (!ret)
@@ -3259,7 +3329,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
        struct rte_eth_ethertype_filter ethertype_filter;
        struct rte_eth_syn_filter syn_filter;
        struct ixgbe_fdir_rule fdir_rule;
-       struct rte_eth_l2_tunnel_conf l2_tn_filter;
+       struct ixgbe_l2_tunnel_conf l2_tn_filter;
        struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
        struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
        struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
@@ -3329,7 +3399,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
                l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
                                pmd_flow->rule;
                rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
-                       sizeof(struct rte_eth_l2_tunnel_conf));
+                       sizeof(struct ixgbe_l2_tunnel_conf));
                ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
                if (!ret) {
                        TAILQ_REMOVE(&filter_l2_tunnel_list,
@@ -3367,6 +3437,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
                        TAILQ_REMOVE(&ixgbe_flow_list,
                                ixgbe_flow_mem_ptr, entries);
                        rte_free(ixgbe_flow_mem_ptr);
+                       break;
                }
        }
        rte_free(flow);