ixgbe: clean up code style
[dpdk.git] / drivers / net / ixgbe / ixgbe_fdir.c
index d294f85..861c7cb 100644 (file)
        rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
 } while (0)
 
+#define DEFAULT_VXLAN_PORT 4789
+#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
+
 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
+static int fdir_set_input_mask(struct rte_eth_dev *dev,
+                              const struct rte_eth_fdir_masks *input_mask);
 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
                const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+                                   const struct rte_eth_fdir_masks *input_mask);
 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
                const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
 static int ixgbe_fdir_filter_to_atr_input(
                const struct rte_eth_fdir_filter *fdir_filter,
-               union ixgbe_atr_input *input);
+               union ixgbe_atr_input *input,
+               enum rte_fdir_mode mode);
 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
                                 uint32_t key);
 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
@@ -122,7 +130,8 @@ static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
                enum rte_fdir_pballoc_type pballoc);
 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
                        union ixgbe_atr_input *input, uint8_t queue,
-                       uint32_t fdircmd, uint32_t fdirhash);
+                       uint32_t fdircmd, uint32_t fdirhash,
+                       enum rte_fdir_mode mode);
 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
                union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
                uint32_t fdirhash);
@@ -180,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
        IXGBE_WRITE_FLUSH(hw);
        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-                                  IXGBE_FDIRCTRL_INIT_DONE)
+                                  IXGBE_FDIRCTRL_INIT_DONE)
                        break;
                msec_delay(1);
        }
 
        if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
-               PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
-                       "during enabling!");
+               PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
                return -ETIMEDOUT;
        }
        return 0;
@@ -243,9 +251,16 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
        *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
                     IXGBE_FDIRCTRL_FLEX_SHIFT;
 
-       if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+       if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
+           conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
                *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
                *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+               if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+                       *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
+                                       << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+               else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+                       *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
+                                       << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
        }
 
        return 0;
@@ -266,6 +281,7 @@ static inline uint32_t
 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
 {
        uint32_t mask = hi_dword << 16;
+
        mask |= lo_dword;
        mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
        mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
@@ -274,7 +290,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
 }
 
 /*
- * This is based on ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
  * but makes use of the rte_fdir_masks structure to see which bits to set.
  */
 static int
@@ -293,6 +309,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        uint32_t fdiripv6m; /* IPv6 source and destination masks. */
        uint16_t dst_ipv6m = 0;
        uint16_t src_ipv6m = 0;
+       volatile uint32_t *reg;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -306,16 +323,16 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
                /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
                fdirm |= IXGBE_FDIRM_L4P;
 
-       if (input_mask->vlan_tci_mask == 0x0FFF)
+       if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
                /* mask VLAN Priority */
                fdirm |= IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask == 0xE000)
+       else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
                /* mask VLAN ID */
                fdirm |= IXGBE_FDIRM_VLANID;
        else if (input_mask->vlan_tci_mask == 0)
                /* mask VLAN ID and Priority */
                fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-       else if (input_mask->vlan_tci_mask != 0xEFFF) {
+       else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
                PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
                return -EINVAL;
        }
@@ -324,25 +341,31 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
        /* store the TCP/UDP port masks, bit reversed from port layout */
-       fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
-                                        input_mask->src_port_mask);
+       fdirtcpm = reverse_fdir_bitmasks(
+                       rte_be_to_cpu_16(input_mask->dst_port_mask),
+                       rte_be_to_cpu_16(input_mask->src_port_mask));
 
-       /* write all the same so that UDP, TCP and SCTP use the same mask */
+       /* write all the same so that UDP, TCP and SCTP use the same mask
+        * (little-endian)
+        */
        IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
        info->mask.src_port_mask = input_mask->src_port_mask;
        info->mask.dst_port_mask = input_mask->dst_port_mask;
 
-       /* Store source and destination IPv4 masks (big-endian) */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
+       /* Store source and destination IPv4 masks (big-endian),
+        * can not use IXGBE_WRITE_REG.
+        */
+       reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
+       *reg = ~(input_mask->ipv4_mask.src_ip);
+       reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
+       *reg = ~(input_mask->ipv4_mask.dst_ip);
        info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
        info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
 
        if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
                /*
-                * IPv6 mask is only meaningful in signature mode
                 * Store source and destination IPv6 masks (bit reversed)
                 */
                IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
@@ -357,6 +380,123 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        return IXGBE_SUCCESS;
 }
 
+/*
+ * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_x550(struct rte_eth_dev *dev,
+                        const struct rte_eth_fdir_masks *input_mask)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw_fdir_info *info =
+                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+       /* mask VM pool and DIPv6 since there are currently not supported
+        * mask FLEX byte, it will be set in flex_conf
+        */
+       uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
+                        IXGBE_FDIRM_FLEX;
+       uint32_t fdiripv6m;
+       enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+       uint16_t mac_mask;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* set the default UDP port for VxLAN */
+       if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+               IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
+
+       /* some bits must be set for mac vlan or tunnel mode */
+       fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
+
+       if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+               /* mask VLAN Priority */
+               fdirm |= IXGBE_FDIRM_VLANP;
+       else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+               /* mask VLAN ID */
+               fdirm |= IXGBE_FDIRM_VLANID;
+       else if (input_mask->vlan_tci_mask == 0)
+               /* mask VLAN ID and Priority */
+               fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+       else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+               PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+               return -EINVAL;
+       }
+       info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+       fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+       fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+       if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+               fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
+                               IXGBE_FDIRIP6M_TNI_VNI;
+
+       mac_mask = input_mask->mac_addr_byte_mask;
+       fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
+                       & IXGBE_FDIRIP6M_INNER_MAC;
+       info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+
+       if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+               switch (input_mask->tunnel_type_mask) {
+               case 0:
+                       /* Mask turnnel type */
+                       fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+                       break;
+               case 1:
+                       break;
+               default:
+                       PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
+                       return -EINVAL;
+               }
+               info->mask.tunnel_type_mask =
+                       input_mask->tunnel_type_mask;
+
+               switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+               case 0x0:
+                       /* Mask vxlan id */
+                       fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
+                       break;
+               case 0x00FFFFFF:
+                       fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+                       break;
+               case 0xFFFFFFFF:
+                       break;
+               default:
+                       PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
+                       return -EINVAL;
+               }
+               info->mask.tunnel_id_mask =
+                       input_mask->tunnel_id_mask;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+
+       return IXGBE_SUCCESS;
+}
+
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+                   const struct rte_eth_fdir_masks *input_mask)
+{
+       enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+       if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+           mode <= RTE_FDIR_MODE_PERFECT)
+               return fdir_set_input_mask_82599(dev, input_mask);
+       else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+                mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+               return fdir_set_input_mask_x550(dev, input_mask);
+
+       PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+       return -ENOTSUP;
+}
+
 /*
  * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
  * arguments are valid
@@ -377,7 +517,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
        fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
 
        if (conf == NULL) {
-               PMD_DRV_LOG(INFO, "NULL pointer.");
+               PMD_DRV_LOG(ERR, "NULL pointer.");
                return -EINVAL;
        }
 
@@ -431,13 +571,23 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
        int err;
        uint32_t fdirctrl, pbsize;
        int i;
+       enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
 
        PMD_INIT_FUNC_TRACE();
 
        if (hw->mac.type != ixgbe_mac_82599EB &&
                hw->mac.type != ixgbe_mac_X540 &&
                hw->mac.type != ixgbe_mac_X550 &&
-               hw->mac.type != ixgbe_mac_X550EM_x)
+               hw->mac.type != ixgbe_mac_X550EM_x &&
+               hw->mac.type != ixgbe_mac_X550EM_a)
+               return -ENOSYS;
+
+       /* x550 supports mac-vlan and tunnel mode but other NICs not */
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a &&
+           mode != RTE_FDIR_MODE_SIGNATURE &&
+           mode != RTE_FDIR_MODE_PERFECT)
                return -ENOSYS;
 
        err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
@@ -462,7 +612,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
        for (i = 1; i < 8; i++)
                IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
 
-       err = fdir_set_input_mask_82599(dev, &dev->data->dev_conf.fdir_conf.mask);
+       err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
        if (err < 0) {
                PMD_INIT_LOG(ERR, " Error on setting FD mask");
                return err;
@@ -488,7 +638,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
  */
 static int
 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
-               union ixgbe_atr_input *input)
+               union ixgbe_atr_input *input, enum rte_fdir_mode mode)
 {
        input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
        input->formatted.flex_bytes = (uint16_t)(
@@ -521,8 +671,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
                input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
                break;
        default:
-               PMD_DRV_LOG(ERR, " Error on flow_type input");
-               return -EINVAL;
+               break;
        }
 
        switch (fdir_filter->input.flow_type) {
@@ -558,8 +707,23 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
                           sizeof(input->formatted.dst_ip));
                break;
        default:
-               PMD_DRV_LOG(ERR, " Error on flow_type input");
-               return -EINVAL;
+               break;
+       }
+
+       if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+               rte_memcpy(
+                       input->formatted.inner_mac,
+                       fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
+                       sizeof(input->formatted.inner_mac));
+       } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+               rte_memcpy(
+                       input->formatted.inner_mac,
+                       fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
+                       sizeof(input->formatted.inner_mac));
+               input->formatted.tunnel_type =
+                       fdir_filter->input.flow.tunnel_flow.tunnel_type;
+               input->formatted.tni_vni =
+                       fdir_filter->input.flow.tunnel_flow.tunnel_id;
        }
 
        return 0;
@@ -614,7 +778,7 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
         *
         *    hi_hash_dword[31:0]  ^= Stream[351:320];
         *
-        *    if(key[0])
+        *    if (key[0])
         *        hash[15:0] ^= Stream[15:0];
         *
         *    for (i = 0; i < 16; i++) {
@@ -646,8 +810,10 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
        hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
 
        /* Process bits 0 and 16 */
-       if (key & 0x0001) hash_result ^= lo_hash_dword;
-       if (key & 0x00010000) hash_result ^= hi_hash_dword;
+       if (key & 0x0001)
+               hash_result ^= lo_hash_dword;
+       if (key & 0x00010000)
+               hash_result ^= hi_hash_dword;
 
        /*
         * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
@@ -658,9 +824,11 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 
 
        /* process the remaining 30 bits in the key 2 bits at a time */
-       for (i = 15; i; i-- ) {
-               if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
-               if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+       for (i = 15; i; i--) {
+               if (key & (0x0001 << i))
+                       hash_result ^= lo_hash_dword >> i;
+               if (key & (0x00010000 << i))
+                       hash_result ^= hi_hash_dword >> i;
        }
 
        return hash_result;
@@ -743,20 +911,55 @@ atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
 static int
 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
                        union ixgbe_atr_input *input, uint8_t queue,
-                       uint32_t fdircmd, uint32_t fdirhash)
+                       uint32_t fdircmd, uint32_t fdirhash,
+                       enum rte_fdir_mode mode)
 {
        uint32_t fdirport, fdirvlan;
+       u32 addr_low, addr_high;
+       u32 tunnel_type = 0;
        int err = 0;
+       volatile uint32_t *reg;
 
-       /* record the IPv4 address (big-endian) */
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
-       /* record source and destination port (little-endian)*/
-       fdirport = IXGBE_NTOHS(input->formatted.dst_port);
-       fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
-       fdirport |= IXGBE_NTOHS(input->formatted.src_port);
-       IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+       if (mode == RTE_FDIR_MODE_PERFECT) {
+               /* record the IPv4 address (big-endian)
+                * can not use IXGBE_WRITE_REG.
+                */
+               reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
+               *reg = input->formatted.src_ip[0];
+               reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
+               *reg = input->formatted.dst_ip[0];
+
+               /* record source and destination port (little-endian)*/
+               fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+               fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+               fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+       } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+                  mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+               /* for mac vlan and tunnel modes */
+               addr_low = ((u32)input->formatted.inner_mac[0] |
+                           ((u32)input->formatted.inner_mac[1] << 8) |
+                           ((u32)input->formatted.inner_mac[2] << 16) |
+                           ((u32)input->formatted.inner_mac[3] << 24));
+               addr_high = ((u32)input->formatted.inner_mac[4] |
+                            ((u32)input->formatted.inner_mac[5] << 8));
+
+               if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
+               } else {
+                       /* tunnel mode */
+                       if (input->formatted.tunnel_type !=
+                               RTE_FDIR_TUNNEL_TYPE_NVGRE)
+                               tunnel_type = 0x80000000;
+                       tunnel_type |= addr_high;
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
+                       IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
+                                       input->formatted.tni_vni);
+               }
+       }
 
        /* record vlan (little-endian) and flex_bytes(big-endian) */
        fdirvlan = input->formatted.flex_bytes;
@@ -817,7 +1020,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 
        /* configure FDIRCMD register */
        fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
-                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
        fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
        fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
@@ -881,9 +1084,9 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
  */
 static int
 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
-                             const struct rte_eth_fdir_filter *fdir_filter,
-                             bool del,
-                             bool update)
+                         const struct rte_eth_fdir_filter *fdir_filter,
+                         bool del,
+                         bool update)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t fdircmd_flags;
@@ -893,9 +1096,10 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        bool is_perfect = FALSE;
        int err;
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+       enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
 
-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
+       if (fdir_mode == RTE_FDIR_MODE_NONE)
                return -ENOTSUP;
 
        /*
@@ -906,39 +1110,42 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
         */
        if ((!del) &&
            (hw->mac.type == ixgbe_mac_X550 ||
-            hw->mac.type == ixgbe_mac_X550EM_x) &&
+            hw->mac.type == ixgbe_mac_X550EM_x ||
+            hw->mac.type == ixgbe_mac_X550EM_a) &&
            (fdir_filter->input.flow_type ==
-              RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+            RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
            (info->mask.src_port_mask != 0 ||
             info->mask.dst_port_mask != 0)) {
                PMD_DRV_LOG(ERR, "By this device,"
-                                " IPv4-other is not supported without"
-                                " L4 protocol and ports masked!");
+                           " IPv4-other is not supported without"
+                           " L4 protocol and ports masked!");
                return -ENOTSUP;
        }
 
-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+       if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+           fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
                is_perfect = TRUE;
 
        memset(&input, 0, sizeof(input));
 
-       err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
+       err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
+                                            fdir_mode);
        if (err)
                return err;
 
        if (is_perfect) {
                if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
                        PMD_DRV_LOG(ERR, "IPv6 is not supported in"
-                                        " perfect mode!");
+                                   " perfect mode!");
                        return -ENOTSUP;
                }
                fdirhash = atr_compute_perfect_hash_82599(&input,
-                               dev->data->dev_conf.fdir_conf.pballoc);
+                                                         dev->data->dev_conf.fdir_conf.pballoc);
                fdirhash |= fdir_filter->soft_id <<
-                               IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+                       IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
        } else
                fdirhash = atr_compute_sig_hash_82599(&input,
-                               dev->data->dev_conf.fdir_conf.pballoc);
+                                                     dev->data->dev_conf.fdir_conf.pballoc);
 
        if (del) {
                err = fdir_erase_filter_82599(hw, fdirhash);
@@ -956,20 +1163,22 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                        fdircmd_flags |= IXGBE_FDIRCMD_DROP;
                } else {
                        PMD_DRV_LOG(ERR, "Drop option is not supported in"
-                               " signature mode.");
+                                   " signature mode.");
                        return -EINVAL;
                }
-       } else if (fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+       } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
+                  fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
                queue = (uint8_t)fdir_filter->action.rx_queue;
        else
                return -EINVAL;
 
        if (is_perfect) {
                err = fdir_write_perfect_filter_82599(hw, &input, queue,
-                               fdircmd_flags, fdirhash);
+                                                     fdircmd_flags, fdirhash,
+                                                     fdir_mode);
        } else {
                err = fdir_add_signature_filter_82599(hw, &input, queue,
-                               fdircmd_flags, fdirhash);
+                                                     fdircmd_flags, fdirhash);
        }
        if (err < 0)
                PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
@@ -1018,7 +1227,8 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
        fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
        max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
                        (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
-       if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
+       if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
+           fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
                fdir_info->guarant_spc = max_num;
        else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
                fdir_info->guarant_spc = max_num * 4;
@@ -1032,11 +1242,20 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
                        fdir_info->mask.ipv6_mask.dst_ip);
        fdir_info->mask.src_port_mask = info->mask.src_port_mask;
        fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+       fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
+       fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+       fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
        fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
-       fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
+       if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
+           fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+               fdir_info->flow_types_mask[0] = 0;
+       else
+               fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+
        fdir_info->flex_payload_unit = sizeof(uint16_t);
        fdir_info->max_flex_payload_segment_num = 1;
-       fdir_info->flex_payload_limit = 62;
+       fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
        fdir_info->flex_conf.nb_payloads = 1;
        fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
        fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
@@ -1054,21 +1273,22 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
        uint32_t reg, max_num;
+       enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
 
        /* Get the information from registers */
        reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
        info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
-                                       IXGBE_FDIRFREE_COLL_SHIFT);
+                                    IXGBE_FDIRFREE_COLL_SHIFT);
        info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
-                                  IXGBE_FDIRFREE_FREE_SHIFT);
+                               IXGBE_FDIRFREE_FREE_SHIFT);
 
        reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
        info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
-                                     IXGBE_FDIRLEN_MAXHASH_SHIFT);
+                                  IXGBE_FDIRLEN_MAXHASH_SHIFT);
        info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
-                                    IXGBE_FDIRLEN_MAXLEN_SHIFT);
+                                 IXGBE_FDIRLEN_MAXLEN_SHIFT);
 
        reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
        info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
@@ -1094,10 +1314,11 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
 
        reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
        max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
-                       (reg & FDIRCTRL_PBALLOC_MASK)));
-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
-                       fdir_stats->guarant_cnt = max_num - fdir_stats->free;
-       else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
+                        (reg & FDIRCTRL_PBALLOC_MASK)));
+       if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+           fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+               fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+       else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
                fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
 
 }
@@ -1118,7 +1339,8 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
        if (hw->mac.type != ixgbe_mac_82599EB &&
                hw->mac.type != ixgbe_mac_X540 &&
                hw->mac.type != ixgbe_mac_X550 &&
-               hw->mac.type != ixgbe_mac_X550EM_x)
+               hw->mac.type != ixgbe_mac_X550EM_x &&
+               hw->mac.type != ixgbe_mac_X550EM_a)
                return -ENOTSUP;
 
        if (filter_op == RTE_ETH_FILTER_NOP)