net/ixgbe: parse flow director filter
authorWei Zhao <wei.zhao1@intel.com>
Fri, 13 Jan 2017 08:13:09 +0000 (16:13 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 17 Jan 2017 18:41:43 +0000 (19:41 +0100)
check if the rule is a flow director rule, and get the flow director info.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Acked-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wei Dai <wei.dai@intel.com>
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_fdir.c
drivers/net/ixgbe/ixgbe_flow.c

index 55fda49..4375ac8 100644 (file)
@@ -1487,6 +1487,8 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
                             "Failed to allocate memory for fdir hash map!");
                return -ENOMEM;
        }
+       fdir_info->mask_added = FALSE;
+
        return 0;
 }
 
index 16f20d8..b3f3980 100644 (file)
@@ -167,6 +167,17 @@ struct ixgbe_fdir_filter {
 /* list of fdir filters */
 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
 
+struct ixgbe_fdir_rule {
+       struct ixgbe_hw_fdir_mask mask;
+       union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+       bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+       bool b_mask; /* If TRUE, mask has meaning. */
+       enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+       uint32_t fdirflags; /* drop or forward */
+       uint32_t soft_id; /* an unique value for this rule */
+       uint8_t queue; /* assigned rx queue */
+};
+
 struct ixgbe_hw_fdir_info {
        struct ixgbe_hw_fdir_mask mask;
        uint8_t     flex_bytes_offset;
@@ -182,6 +193,7 @@ struct ixgbe_hw_fdir_info {
        /* store the pointers of the filters, index is the hash value. */
        struct ixgbe_fdir_filter **hash_map;
        struct rte_hash *hash_handle; /* cuckoo hash handler */
+       bool mask_added; /* If already got mask from consistent filter */
 };
 
 /* structure for interrupt relative data */
@@ -520,6 +532,10 @@ bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
  * Flow director function prototypes
  */
 int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+                             struct ixgbe_fdir_rule *rule,
+                             bool del, bool update);
 
 void ixgbe_configure_dcb(struct rte_eth_dev *dev);
 
index e928ad7..3b9d60c 100644 (file)
 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
 static int fdir_set_input_mask(struct rte_eth_dev *dev,
                               const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-               const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
-                                   const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
                const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -295,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
  * but makes use of the rte_fdir_masks structure to see which bits to set.
  */
 static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-               const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_hw_fdir_info *info =
@@ -308,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
        uint32_t fdirtcpm;  /* TCP source and destination port masks. */
        uint32_t fdiripv6m; /* IPv6 source and destination masks. */
-       uint16_t dst_ipv6m = 0;
-       uint16_t src_ipv6m = 0;
        volatile uint32_t *reg;
 
        PMD_INIT_FUNC_TRACE();
@@ -320,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
         * a VLAN of 0 is unspecified, so mask that out as well.  L4type
         * cannot be masked out in this implementation.
         */
-       if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+       if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
                /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
                fdirm |= IXGBE_FDIRM_L4P;
 
-       if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+       if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
                /* mask VLAN Priority */
                fdirm |= IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+       else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
                /* mask VLAN ID */
                fdirm |= IXGBE_FDIRM_VLANID;
-       else if (input_mask->vlan_tci_mask == 0)
+       else if (info->mask.vlan_tci_mask == 0)
                /* mask VLAN ID and Priority */
                fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+       else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
                PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
                return -EINVAL;
        }
-       info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
 
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
        /* store the TCP/UDP port masks, bit reversed from port layout */
        fdirtcpm = reverse_fdir_bitmasks(
-                       rte_be_to_cpu_16(input_mask->dst_port_mask),
-                       rte_be_to_cpu_16(input_mask->src_port_mask));
+                       rte_be_to_cpu_16(info->mask.dst_port_mask),
+                       rte_be_to_cpu_16(info->mask.src_port_mask));
 
        /* write all the same so that UDP, TCP and SCTP use the same mask
         * (little-endian)
@@ -352,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
-       info->mask.src_port_mask = input_mask->src_port_mask;
-       info->mask.dst_port_mask = input_mask->dst_port_mask;
 
        /* Store source and destination IPv4 masks (big-endian),
         * can not use IXGBE_WRITE_REG.
         */
        reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
-       *reg = ~(input_mask->ipv4_mask.src_ip);
+       *reg = ~(info->mask.src_ipv4_mask);
        reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
-       *reg = ~(input_mask->ipv4_mask.dst_ip);
-       info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
-       info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+       *reg = ~(info->mask.dst_ipv4_mask);
 
        if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
                /*
                 * Store source and destination IPv6 masks (bit reversed)
                 */
-               IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
-               IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
-               fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+               fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+                           info->mask.src_ipv6_mask;
 
                IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
-               info->mask.src_ipv6_mask = src_ipv6m;
-               info->mask.dst_ipv6_mask = dst_ipv6m;
        }
 
        return IXGBE_SUCCESS;
@@ -386,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
  * but makes use of the rte_fdir_masks structure to see which bits to set.
  */
 static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
-                        const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_hw_fdir_info *info =
@@ -410,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
        /* some bits must be set for mac vlan or tunnel mode */
        fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
 
-       if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+       if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
                /* mask VLAN Priority */
                fdirm |= IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+       else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
                /* mask VLAN ID */
                fdirm |= IXGBE_FDIRM_VLANID;
-       else if (input_mask->vlan_tci_mask == 0)
+       else if (info->mask.vlan_tci_mask == 0)
                /* mask VLAN ID and Priority */
                fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+       else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
                PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
                return -EINVAL;
        }
-       info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
 
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -434,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
                                IXGBE_FDIRIP6M_TNI_VNI;
 
        if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
-               mac_mask = input_mask->mac_addr_byte_mask;
+               mac_mask = info->mask.mac_addr_byte_mask;
                fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
                                & IXGBE_FDIRIP6M_INNER_MAC;
-               info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
 
-               switch (input_mask->tunnel_type_mask) {
+               switch (info->mask.tunnel_type_mask) {
                case 0:
                        /* Mask turnnel type */
                        fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -450,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
                        PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
                        return -EINVAL;
                }
-               info->mask.tunnel_type_mask =
-                       input_mask->tunnel_type_mask;
 
-               switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+               switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
                case 0x0:
                        /* Mask vxlan id */
                        fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -467,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
                        PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
                        return -EINVAL;
                }
-               info->mask.tunnel_id_mask =
-                       input_mask->tunnel_id_mask;
        }
 
        IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -482,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
 }
 
 static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
-                   const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+                                 const struct rte_eth_fdir_masks *input_mask)
+{
+       struct ixgbe_hw_fdir_info *info =
+               IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+       uint16_t dst_ipv6m = 0;
+       uint16_t src_ipv6m = 0;
+
+       memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+       info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+       info->mask.src_port_mask = input_mask->src_port_mask;
+       info->mask.dst_port_mask = input_mask->dst_port_mask;
+       info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+       info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+       IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+       IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+       info->mask.src_ipv6_mask = src_ipv6m;
+       info->mask.dst_ipv6_mask = dst_ipv6m;
+
+       return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+                                const struct rte_eth_fdir_masks *input_mask)
+{
+       struct ixgbe_hw_fdir_info *info =
+               IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+       memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+       info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+       info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+       info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+       info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+       return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+                           const struct rte_eth_fdir_masks *input_mask)
 {
        enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 
        if (mode >= RTE_FDIR_MODE_SIGNATURE &&
            mode <= RTE_FDIR_MODE_PERFECT)
-               return fdir_set_input_mask_82599(dev, input_mask);
+               return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
        else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
                 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
-               return fdir_set_input_mask_x550(dev, input_mask);
+               return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
 
        PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
        return -ENOTSUP;
 }
 
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
+{
+       enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+       if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+           mode <= RTE_FDIR_MODE_PERFECT)
+               return fdir_set_input_mask_82599(dev);
+       else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+                mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+               return fdir_set_input_mask_x550(dev);
+
+       PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+       return -ENOTSUP;
+}
+
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+                   const struct rte_eth_fdir_masks *input_mask)
+{
+       int ret;
+
+       ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+       if (ret)
+               return ret;
+
+       return ixgbe_fdir_set_input_mask(dev);
+}
+
 /*
  * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
  * arguments are valid
@@ -1135,23 +1183,40 @@ ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
        return 0;
 }
 
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
 static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
-                         const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+                           const struct rte_eth_fdir_filter *fdir_filter,
+                           struct ixgbe_fdir_rule *rule)
+{
+       enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+       int err;
+
+       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+       err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+                                            &rule->ixgbe_fdir,
+                                            fdir_mode);
+       if (err)
+               return err;
+
+       rule->mode = fdir_mode;
+       if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+               rule->fdirflags = IXGBE_FDIRCMD_DROP;
+       rule->queue = fdir_filter->action.rx_queue;
+       rule->soft_id = fdir_filter->soft_id;
+
+       return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+                         struct ixgbe_fdir_rule *rule,
                          bool del,
                          bool update)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t fdircmd_flags;
        uint32_t fdirhash;
-       union ixgbe_atr_input input;
        uint8_t queue;
        bool is_perfect = FALSE;
        int err;
@@ -1161,7 +1226,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        struct ixgbe_fdir_filter *node;
        bool add_node = FALSE;
 
-       if (fdir_mode == RTE_FDIR_MODE_NONE)
+       if (fdir_mode == RTE_FDIR_MODE_NONE ||
+           fdir_mode != rule->mode)
                return -ENOTSUP;
 
        /*
@@ -1174,7 +1240,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
            (hw->mac.type == ixgbe_mac_X550 ||
             hw->mac.type == ixgbe_mac_X550EM_x ||
             hw->mac.type == ixgbe_mac_X550EM_a) &&
-           (fdir_filter->input.flow_type ==
+           (rule->ixgbe_fdir.formatted.flow_type ==
             RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
            (info->mask.src_port_mask != 0 ||
             info->mask.dst_port_mask != 0)) {
@@ -1188,29 +1254,23 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
            fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
                is_perfect = TRUE;
 
-       memset(&input, 0, sizeof(input));
-
-       err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
-                                            fdir_mode);
-       if (err)
-               return err;
-
        if (is_perfect) {
-               if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+               if (rule->ixgbe_fdir.formatted.flow_type &
+                   IXGBE_ATR_L4TYPE_IPV6_MASK) {
                        PMD_DRV_LOG(ERR, "IPv6 is not supported in"
                                    " perfect mode!");
                        return -ENOTSUP;
                }
-               fdirhash = atr_compute_perfect_hash_82599(&input,
+               fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
                                                          dev->data->dev_conf.fdir_conf.pballoc);
-               fdirhash |= fdir_filter->soft_id <<
+               fdirhash |= rule->soft_id <<
                        IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
        } else
-               fdirhash = atr_compute_sig_hash_82599(&input,
+               fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
                                                      dev->data->dev_conf.fdir_conf.pballoc);
 
        if (del) {
-               err = ixgbe_remove_fdir_filter(info, &input);
+               err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
                if (err < 0)
                        return err;
 
@@ -1223,7 +1283,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        }
        /* add or update an fdir filter*/
        fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
-       if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+       if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
                if (is_perfect) {
                        queue = dev->data->dev_conf.fdir_conf.drop_queue;
                        fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1232,13 +1292,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                                    " signature mode.");
                        return -EINVAL;
                }
-       } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
-                  fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
-               queue = (uint8_t)fdir_filter->action.rx_queue;
+       } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+               queue = (uint8_t)rule->queue;
        else
                return -EINVAL;
 
-       node = ixgbe_fdir_filter_lookup(info, &input);
+       node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
        if (node) {
                if (update) {
                        node->fdirflags = fdircmd_flags;
@@ -1256,7 +1315,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                if (!node)
                        return -ENOMEM;
                (void)rte_memcpy(&node->ixgbe_fdir,
-                                &input,
+                                &rule->ixgbe_fdir,
                                 sizeof(union ixgbe_atr_input));
                node->fdirflags = fdircmd_flags;
                node->fdirhash = fdirhash;
@@ -1270,18 +1329,19 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        }
 
        if (is_perfect) {
-               err = fdir_write_perfect_filter_82599(hw, &input, queue,
-                                                     fdircmd_flags, fdirhash,
-                                                     fdir_mode);
+               err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+                                                     queue, fdircmd_flags,
+                                                     fdirhash, fdir_mode);
        } else {
-               err = fdir_add_signature_filter_82599(hw, &input, queue,
-                                                     fdircmd_flags, fdirhash);
+               err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+                                                     queue, fdircmd_flags,
+                                                     fdirhash);
        }
        if (err < 0) {
                PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
 
                if (add_node)
-                       (void)ixgbe_remove_fdir_filter(info, &input);
+                       (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
        } else {
                PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
        }
@@ -1289,6 +1349,29 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        return err;
 }
 
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+                         const struct rte_eth_fdir_filter *fdir_filter,
+                         bool del,
+                         bool update)
+{
+       struct ixgbe_fdir_rule rule;
+       int err;
+
+       err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+       if (err)
+               return err;
+
+       return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
 static int
 ixgbe_fdir_flush(struct rte_eth_dev *dev)
 {
@@ -1522,19 +1605,23 @@ ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
        struct ixgbe_hw_fdir_info *fdir_info =
                IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
        struct ixgbe_fdir_filter *fdir_filter;
+       struct ixgbe_fdir_filter *filter_flag;
        int ret = 0;
 
        /* flush flow director */
        rte_hash_reset(fdir_info->hash_handle);
        memset(fdir_info->hash_map, 0,
               sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+       filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
        while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
                TAILQ_REMOVE(&fdir_info->fdir_list,
                             fdir_filter,
                             entries);
                rte_free(fdir_filter);
        }
-       ret = ixgbe_fdir_flush(dev);
+
+       if (filter_flag != NULL)
+               ret = ixgbe_fdir_flush(dev);
 
        return ret;
 }
index 4006084..257cd6f 100644 (file)
@@ -127,6 +127,31 @@ ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
                        struct rte_eth_l2_tunnel_conf *rule,
                        struct rte_flow_error *error);
 static int
+ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct ixgbe_fdir_rule *rule,
+                       struct rte_flow_error *error);
+static int
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct ixgbe_fdir_rule *rule,
+               struct rte_flow_error *error);
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct ixgbe_fdir_rule *rule,
+               struct rte_flow_error *error);
+static int
+ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct ixgbe_fdir_rule *rule,
+               struct rte_flow_error *error);
+static int
 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
                const struct rte_flow_attr *attr,
                const struct rte_flow_item pattern[],
@@ -1243,39 +1268,1214 @@ ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
        return ret;
 }
 
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+                         const struct rte_flow_action actions[],
+                         struct ixgbe_fdir_rule *rule,
+                         struct rte_flow_error *error)
+{
+       const struct rte_flow_action *act;
+       const struct rte_flow_action_queue *act_q;
+       const struct rte_flow_action_mark *mark;
+       uint32_t index;
+
+       /* parse attr */
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+                       attr, "Only support ingress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+                       attr, "Not support egress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->priority) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                       attr, "Not support priority.");
+               return -rte_errno;
+       }
+
+       /* parse action */
+       index = 0;
+
+       /* check if the first not void action is QUEUE or DROP. */
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+           act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+               act_q = (const struct rte_flow_action_queue *)act->conf;
+               rule->queue = act_q->index;
+       } else { /* drop */
+               rule->fdirflags = IXGBE_FDIRCMD_DROP;
+       }
+
+       /* check if the next not void item is MARK */
+       index++;
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+               (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       rule->soft_id = 0;
+
+       if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+               mark = (const struct rte_flow_action_mark *)act->conf;
+               rule->soft_id = mark->id;
+               index++;
+               NEXT_ITEM_OF_ACTION(act, actions, index);
+       }
+
+       /* check if the next not void item is END */
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
 /**
- * Check if the flow rule is supported by ixgbe.
- * It only checkes the format. Don't guarantee the rule can be programmed into
- * the HW. Because there can be no enough room for the rule.
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item must be END.
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         NULL                    NULL
+ * IPV4                src_addr 192.168.1.20   0xFFFFFFFF
+ *             dst_addr 192.167.3.50   0xFFFFFFFF
+ * UDP/TCP/SCTP        src_port        80      0xFFFF
+ *             dst_port        80      0xFFFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         dst_addr
+               {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
+               0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
+ * MAC VLAN    tci     0x2016          0xFFFF
+ *             tpid    0x8100          0xFFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
  */
 static int
-ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
-               const struct rte_flow_attr *attr,
-               const struct rte_flow_item pattern[],
-               const struct rte_flow_action actions[],
-               struct rte_flow_error *error)
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+                              const struct rte_flow_item pattern[],
+                              const struct rte_flow_action actions[],
+                              struct ixgbe_fdir_rule *rule,
+                              struct rte_flow_error *error)
 {
-       struct rte_eth_ntuple_filter ntuple_filter;
-       struct rte_eth_ethertype_filter ethertype_filter;
-       struct rte_eth_syn_filter syn_filter;
-       struct rte_eth_l2_tunnel_conf l2_tn_filter;
-       int ret;
+       const struct rte_flow_item *item;
+       const struct rte_flow_item_eth *eth_spec;
+       const struct rte_flow_item_eth *eth_mask;
+       const struct rte_flow_item_ipv4 *ipv4_spec;
+       const struct rte_flow_item_ipv4 *ipv4_mask;
+       const struct rte_flow_item_tcp *tcp_spec;
+       const struct rte_flow_item_tcp *tcp_mask;
+       const struct rte_flow_item_udp *udp_spec;
+       const struct rte_flow_item_udp *udp_mask;
+       const struct rte_flow_item_sctp *sctp_spec;
+       const struct rte_flow_item_sctp *sctp_mask;
+       const struct rte_flow_item_vlan *vlan_spec;
+       const struct rte_flow_item_vlan *vlan_mask;
 
-       memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
-       ret = ixgbe_parse_ntuple_filter(attr, pattern,
-                               actions, &ntuple_filter, error);
-       if (!ret)
-               return 0;
+       uint32_t index, j;
 
-       memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
-       ret = ixgbe_parse_ethertype_filter(attr, pattern,
-                               actions, &ethertype_filter, error);
-       if (!ret)
-               return 0;
+       if (!pattern) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                       NULL, "NULL pattern.");
+               return -rte_errno;
+       }
 
-       memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
-       ret = ixgbe_parse_syn_filter(attr, pattern,
-                               actions, &syn_filter, error);
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return -rte_errno;
+       }
+
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return -rte_errno;
+       }
+
+       /**
+        * Some fields may not be provided. Set spec to 0 and mask to default
+        * value. So, we need not do anything for the not provided fields later.
+        */
+       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+       memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+       rule->mask.vlan_tci_mask = 0;
+
+       /* parse pattern */
+       index = 0;
+
+       /**
+        * The first not void item should be
+        * MAC or IPv4 or TCP or UDP or SCTP.
+        */
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+           item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+           item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+           item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+
+       rule->mode = RTE_FDIR_MODE_PERFECT;
+
+       /*Not supported last point for range*/
+       if (item->last) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+       }
+
+       /* Get the MAC info. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /**
+                * Only support vlan and dst MAC address,
+                * others should be masked.
+                */
+               if (item->spec && !item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+                       /* Get the dst MAC. */
+                       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                               rule->ixgbe_fdir.formatted.inner_mac[j] =
+                                       eth_spec->dst.addr_bytes[j];
+                       }
+               }
+
+
+               if (item->mask) {
+                       /* If ethernet has meaning, it means MAC VLAN mode. */
+                       rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+                       rule->b_mask = TRUE;
+                       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+                       /* Ether type should be masked. */
+                       if (eth_mask->type) {
+                               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
+
+                       /**
+                        * src MAC address must be masked,
+                        * and don't support dst MAC address mask.
+                        */
+                       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                               if (eth_mask->src.addr_bytes[j] ||
+                                       eth_mask->dst.addr_bytes[j] != 0xFF) {
+                                       memset(rule, 0,
+                                       sizeof(struct ixgbe_fdir_rule));
+                                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                                       return -rte_errno;
+                               }
+                       }
+
+                       /* When no VLAN, considered as full mask. */
+                       rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+               }
+               /*** If both spec and mask are item,
+                * it means don't care about ETH.
+                * Do nothing.
+                */
+
+               /**
+                * Check if the next not void item is vlan or ipv4.
+                * IPv6 is not supported.
+                */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+                       if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+                               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
+               } else {
+                       if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+                               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
+               }
+       }
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+               if (!(item->spec && item->mask)) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+
+               vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+               vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+               if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+               if (vlan_mask->tpid != (uint16_t)~0U) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.vlan_tci_mask = vlan_mask->tci;
+               /* More than one tags are not supported. */
+
+               /**
+                * Check if the next not void item is not vlan.
+                */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Get the IP info. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+               /**
+                * Set the flow type even if there's no content
+                * as we must have a flow type.
+                */
+               rule->ixgbe_fdir.formatted.flow_type =
+                       IXGBE_ATR_FLOW_TYPE_IPV4;
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               /**
+                * Only care about src & dst addresses,
+                * others should be masked.
+                */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+               ipv4_mask =
+                       (const struct rte_flow_item_ipv4 *)item->mask;
+               if (ipv4_mask->hdr.version_ihl ||
+                   ipv4_mask->hdr.type_of_service ||
+                   ipv4_mask->hdr.total_length ||
+                   ipv4_mask->hdr.packet_id ||
+                   ipv4_mask->hdr.fragment_offset ||
+                   ipv4_mask->hdr.time_to_live ||
+                   ipv4_mask->hdr.next_proto_id ||
+                   ipv4_mask->hdr.hdr_checksum) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+               rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       ipv4_spec =
+                               (const struct rte_flow_item_ipv4 *)item->spec;
+                       rule->ixgbe_fdir.formatted.dst_ip[0] =
+                               ipv4_spec->hdr.dst_addr;
+                       rule->ixgbe_fdir.formatted.src_ip[0] =
+                               ipv4_spec->hdr.src_addr;
+               }
+
+               /**
+                * Check if the next not void item is
+                * TCP or UDP or SCTP or END.
+                */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+                   item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+                   item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+                   item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Get the TCP info. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+               /**
+                * Set the flow type even if there's no content
+                * as we must have a flow type.
+                */
+               rule->ixgbe_fdir.formatted.flow_type =
+                       IXGBE_ATR_FLOW_TYPE_TCPV4;
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               /**
+                * Only care about src & dst ports,
+                * others should be masked.
+                */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+               if (tcp_mask->hdr.sent_seq ||
+                   tcp_mask->hdr.recv_ack ||
+                   tcp_mask->hdr.data_off ||
+                   tcp_mask->hdr.tcp_flags ||
+                   tcp_mask->hdr.rx_win ||
+                   tcp_mask->hdr.cksum ||
+                   tcp_mask->hdr.tcp_urp) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+               rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+                       rule->ixgbe_fdir.formatted.src_port =
+                               tcp_spec->hdr.src_port;
+                       rule->ixgbe_fdir.formatted.dst_port =
+                               tcp_spec->hdr.dst_port;
+               }
+       }
+
+       /* Get the UDP info */
+       if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+               /**
+                * Set the flow type even if there's no content
+                * as we must have a flow type.
+                */
+               rule->ixgbe_fdir.formatted.flow_type =
+                       IXGBE_ATR_FLOW_TYPE_UDPV4;
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               /**
+                * Only care about src & dst ports,
+                * others should be masked.
+                */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+               if (udp_mask->hdr.dgram_len ||
+                   udp_mask->hdr.dgram_cksum) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.src_port_mask = udp_mask->hdr.src_port;
+               rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       udp_spec = (const struct rte_flow_item_udp *)item->spec;
+                       rule->ixgbe_fdir.formatted.src_port =
+                               udp_spec->hdr.src_port;
+                       rule->ixgbe_fdir.formatted.dst_port =
+                               udp_spec->hdr.dst_port;
+               }
+       }
+
+       /* Get the SCTP info */
+       if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+               /**
+                * Set the flow type even if there's no content
+                * as we must have a flow type.
+                */
+               rule->ixgbe_fdir.formatted.flow_type =
+                       IXGBE_ATR_FLOW_TYPE_SCTPV4;
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               /**
+                * Only care about src & dst ports,
+                * others should be masked.
+                */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+               sctp_mask =
+                       (const struct rte_flow_item_sctp *)item->mask;
+               if (sctp_mask->hdr.tag ||
+                   sctp_mask->hdr.cksum) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+               rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       sctp_spec =
+                               (const struct rte_flow_item_sctp *)item->spec;
+                       rule->ixgbe_fdir.formatted.src_port =
+                               sctp_spec->hdr.src_port;
+                       rule->ixgbe_fdir.formatted.dst_port =
+                               sctp_spec->hdr.dst_port;
+               }
+       }
+
+       if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               /* check if the next not void item is END */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         NULL                    NULL
+ * IPV4/IPV6   NULL                    NULL
+ * UDP         NULL                    NULL
+ * VxLAN       vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
+ * END
+ * NEGRV pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         NULL                    NULL
+ * IPV4/IPV6   NULL                    NULL
+ * NVGRE       protocol        0x6558  0xFFFF
+ *             tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+                              const struct rte_flow_item pattern[],
+                              const struct rte_flow_action actions[],
+                              struct ixgbe_fdir_rule *rule,
+                              struct rte_flow_error *error)
+{
+       const struct rte_flow_item *item;
+       const struct rte_flow_item_vxlan *vxlan_spec;
+       const struct rte_flow_item_vxlan *vxlan_mask;
+       const struct rte_flow_item_nvgre *nvgre_spec;
+       const struct rte_flow_item_nvgre *nvgre_mask;
+       const struct rte_flow_item_eth *eth_spec;
+       const struct rte_flow_item_eth *eth_mask;
+       const struct rte_flow_item_vlan *vlan_spec;
+       const struct rte_flow_item_vlan *vlan_mask;
+       uint32_t index, j;
+
+       if (!pattern) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                                  NULL, "NULL pattern.");
+               return -rte_errno;
+       }
+
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return -rte_errno;
+       }
+
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return -rte_errno;
+       }
+
+       /**
+        * Some fields may not be provided. Set spec to 0 and mask to default
+        * value. So, we need not do anything for the not provided fields later.
+        */
+       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+       memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+       rule->mask.vlan_tci_mask = 0;
+
+       /* parse pattern */
+       index = 0;
+
+       /**
+        * The first not void item should be
+        * MAC or IPv4 or IPv6 or UDP or VxLAN.
+        */
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+           item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+           item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+           item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+
+       rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+       /* Skip MAC. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /* Only used to describe the protocol stack. */
+               if (item->spec || item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+
+               /* Check if the next not void item is IPv4 or IPv6. */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+                   item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Skip IP. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+           item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+               /* Only used to describe the protocol stack. */
+               if (item->spec || item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+
+               /* Check if the next not void item is UDP or NVGRE. */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+                   item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Skip UDP. */
+       if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+               /* Only used to describe the protocol stack. */
+               if (item->spec || item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+
+               /* Check if the next not void item is VxLAN. */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Get the VxLAN info */
+       if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+               rule->ixgbe_fdir.formatted.tunnel_type =
+                       RTE_FDIR_TUNNEL_TYPE_VXLAN;
+
+               /* Only care about VNI, others should be masked. */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+
+               /* Tunnel type is always meaningful. */
+               rule->mask.tunnel_type_mask = 1;
+
+               vxlan_mask =
+                       (const struct rte_flow_item_vxlan *)item->mask;
+               if (vxlan_mask->flags) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /* VNI must be totally masked or not. */
+               if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
+                       vxlan_mask->vni[2]) &&
+                       ((vxlan_mask->vni[0] != 0xFF) ||
+                       (vxlan_mask->vni[1] != 0xFF) ||
+                               (vxlan_mask->vni[2] != 0xFF))) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+                       RTE_DIM(vxlan_mask->vni));
+               rule->mask.tunnel_id_mask <<= 8;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       vxlan_spec = (const struct rte_flow_item_vxlan *)
+                                       item->spec;
+                       rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+                               vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+                       rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+               }
+       }
+
+       /* Get the NVGRE info */
+       if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+               rule->ixgbe_fdir.formatted.tunnel_type =
+                       RTE_FDIR_TUNNEL_TYPE_NVGRE;
+
+               /**
+                * Only care about flags0, flags1, protocol and TNI,
+                * others should be masked.
+                */
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+
+               /* Tunnel type is always meaningful. */
+               rule->mask.tunnel_type_mask = 1;
+
+               nvgre_mask =
+                       (const struct rte_flow_item_nvgre *)item->mask;
+               if (nvgre_mask->flow_id) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               if (nvgre_mask->c_k_s_rsvd0_ver !=
+                       rte_cpu_to_be_16(0x3000) ||
+                   nvgre_mask->protocol != 0xFFFF) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /* TNI must be totally masked or not. */
+               if (nvgre_mask->tni[0] &&
+                   ((nvgre_mask->tni[0] != 0xFF) ||
+                   (nvgre_mask->tni[1] != 0xFF) ||
+                   (nvgre_mask->tni[2] != 0xFF))) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /* tni is a 24-bits bit field */
+               rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
+                       RTE_DIM(nvgre_mask->tni));
+               rule->mask.tunnel_id_mask <<= 8;
+
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       nvgre_spec =
+                               (const struct rte_flow_item_nvgre *)item->spec;
+                       if (nvgre_spec->c_k_s_rsvd0_ver !=
+                           rte_cpu_to_be_16(0x2000) ||
+                           nvgre_spec->protocol !=
+                           rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+                               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
+                       /* tni is a 24-bits bit field */
+                       rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+                       nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
+                       rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+               }
+       }
+
+       /* check if the next not void item is MAC */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+
+       /**
+        * Only support vlan and dst MAC address,
+        * others should be masked.
+        */
+
+       if (!item->mask) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+       /*Not supported last point for range*/
+       if (item->last) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+       }
+       rule->b_mask = TRUE;
+       eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+       /* Ether type should be masked. */
+       if (eth_mask->type) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+
+       /* src MAC address should be masked. */
+       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+               if (eth_mask->src.addr_bytes[j]) {
+                       memset(rule, 0,
+                              sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+       rule->mask.mac_addr_byte_mask = 0;
+       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+               /* It's a per byte mask. */
+               if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+                       rule->mask.mac_addr_byte_mask |= 0x1 << j;
+               } else if (eth_mask->dst.addr_bytes[j]) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* When no vlan, considered as full mask. */
+       rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+       if (item->spec) {
+               rule->b_spec = TRUE;
+               eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+               /* Get the dst MAC. */
+               for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                       rule->ixgbe_fdir.formatted.inner_mac[j] =
+                               eth_spec->dst.addr_bytes[j];
+               }
+       }
+
+       /**
+        * Check if the next not void item is vlan or ipv4.
+        * IPv6 is not supported.
+        */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+               (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
+               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by fdir filter");
+               return -rte_errno;
+       }
+       /*Not supported last point for range*/
+       if (item->last) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+       }
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+               if (!(item->spec && item->mask)) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+               vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+               if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+               if (vlan_mask->tpid != (uint16_t)~0U) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               rule->mask.vlan_tci_mask = vlan_mask->tci;
+               /* More than one tags are not supported. */
+
+               /**
+                * Check if the next not void item is not vlan.
+                */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+               /* check if the next not void item is END */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /**
+        * If the tags is 0, it means don't care about the VLAN.
+        * Do nothing.
+        */
+
+       return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct ixgbe_fdir_rule *rule,
+                       struct rte_flow_error *error)
+{
+       int ret = 0;
+
+       enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+       ixgbe_parse_fdir_filter(attr, pattern, actions,
+                               rule, error);
+
+
+       if (fdir_mode == RTE_FDIR_MODE_NONE ||
+           fdir_mode != rule->mode)
+               return -ENOTSUP;
+
+       return ret;
+}
+
+static int
+ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[],
+                       struct ixgbe_fdir_rule *rule,
+                       struct rte_flow_error *error)
+{
+       int ret;
+
+       ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+                                       actions, rule, error);
+
+       if (!ret)
+               return 0;
+
+       ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
+                                       actions, rule, error);
+
+       return ret;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct rte_eth_ntuple_filter ntuple_filter;
+       struct rte_eth_ethertype_filter ethertype_filter;
+       struct rte_eth_syn_filter syn_filter;
+       struct rte_eth_l2_tunnel_conf l2_tn_filter;
+       struct ixgbe_fdir_rule fdir_rule;
+       int ret;
+
+       memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+       ret = ixgbe_parse_ntuple_filter(attr, pattern,
+                               actions, &ntuple_filter, error);
+       if (!ret)
+               return 0;
+
+       memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+       ret = ixgbe_parse_ethertype_filter(attr, pattern,
+                               actions, &ethertype_filter, error);
+       if (!ret)
+               return 0;
+
+       memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+       ret = ixgbe_parse_syn_filter(attr, pattern,
+                               actions, &syn_filter, error);
+       if (!ret)
+               return 0;
+
+       memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+       ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
+                               actions, &fdir_rule, error);
        if (!ret)
                return 0;