update Intel copyright years to 2014
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index 1434ac9..01c69e9 100644 (file)
@@ -1,35 +1,34 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
  * 
- *   Redistribution and use in source and binary forms, with or without 
- *   modification, are permitted provided that the following conditions 
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
  *   are met:
  * 
- *     * Redistributions of source code must retain the above copyright 
+ *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright 
- *       notice, this list of conditions and the following disclaimer in 
- *       the documentation and/or other materials provided with the 
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
  *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its 
- *       contributors may be used to endorse or promote products derived 
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
  * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * 
  */
 
 #include <sys/queue.h>
@@ -65,6 +64,7 @@
 #include "ixgbe/ixgbe_vf.h"
 #include "ixgbe/ixgbe_common.h"
 #include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
 
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
@@ -84,6 +84,8 @@
 
 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
+#define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
+
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
@@ -163,6 +165,22 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 
+/* For Eth VMDQ APIs support */
+static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+               ether_addr* mac_addr,uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool, 
+               uint16_t rx_mask, uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 
+               uint64_t pool_mask,uint8_t vlan_on);
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 
+               struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+               uint8_t rule_id, uint8_t on);
+static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+               uint8_t rule_id);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -244,6 +262,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
+       .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
@@ -252,6 +271,14 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
        .mac_addr_remove      = ixgbe_remove_rar,
+       .uc_hash_table_set    = ixgbe_uc_hash_table_set,
+       .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
+       .mirror_rule_set        = ixgbe_mirror_rule_set,
+       .mirror_rule_reset      = ixgbe_mirror_rule_reset,
+       .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
+       .set_vf_rx            = ixgbe_set_pool_rx,
+       .set_vf_tx            = ixgbe_set_pool_tx,
+       .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
        .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
        .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
        .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
@@ -262,6 +289,17 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .fdir_set_masks               = ixgbe_fdir_set_masks,
        .reta_update          = ixgbe_dev_rss_reta_update,
        .reta_query           = ixgbe_dev_rss_reta_query,
+#ifdef RTE_NIC_BYPASS
+       .bypass_init          = ixgbe_bypass_init,
+       .bypass_state_set     = ixgbe_bypass_state_store,
+       .bypass_state_show    = ixgbe_bypass_state_show,
+       .bypass_event_set     = ixgbe_bypass_event_store,
+       .bypass_event_show    = ixgbe_bypass_event_show,
+       .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
+       .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
+       .bypass_ver_show      = ixgbe_bypass_ver_show,
+       .bypass_wd_reset      = ixgbe_bypass_wd_reset,
+#endif /* RTE_NIC_BYPASS */
 };
 
 /*
@@ -594,7 +632,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 #endif
 
        /* Initialize the shared code */
+#ifdef RTE_NIC_BYPASS
+       diag = ixgbe_bypass_init_shared_code(hw);
+#else
        diag = ixgbe_init_shared_code(hw);
+#endif /* RTE_NIC_BYPASS */
+
        if (diag != IXGBE_SUCCESS) {
                PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
                return -EIO;
@@ -620,7 +663,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                return -EIO;
        }
 
+#ifdef RTE_NIC_BYPASS
+       diag = ixgbe_bypass_init_hw(hw);
+#else
        diag = ixgbe_init_hw(hw);
+#endif /* RTE_NIC_BYPASS */
 
        /*
         * Devices with copper phys will fail to initialise if ixgbe_init_hw()
@@ -671,6 +718,16 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
                        &eth_dev->data->mac_addrs[0]);
+       
+       /* Allocate memory for storing hash filter MAC addresses */
+       eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+                       IXGBE_VMDQ_NUM_UC_MAC, 0);
+       if (eth_dev->data->hash_mac_addrs == NULL) {
+               PMD_INIT_LOG(ERR,
+                       "Failed to allocate %d bytes needed to store MAC addresses",
+                       ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+               return -ENOMEM;
+       }
 
        /* initialize the vfta */
        memset(shadow_vfta, 0, sizeof(*shadow_vfta));
@@ -732,6 +789,18 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+       /* for secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               if (eth_dev->data->scattered_rx)
+                       eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               return 0;
+       }
+
        pci_dev = eth_dev->pci_dev;
 
        hw->device_id = pci_dev->id.device_id;
@@ -1132,6 +1201,17 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 }
 
+static void
+ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+       uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+}
+
 static int
 ixgbe_dev_configure(struct rte_eth_dev *dev)
 {
@@ -1252,7 +1332,12 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
                ETH_VLAN_EXTEND_MASK;
        ixgbe_vlan_offload_set(dev, mask);
-       
+
+       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+               /* Enable vlan filtering for VMDq */
+               ixgbe_vmdq_vlan_hw_filter_enable(dev);
+       }       
+
        /* Configure DCB hw */
        ixgbe_configure_dcb(dev); 
 
@@ -1577,6 +1662,12 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+       dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               dev_info->max_vmdq_pools = ETH_16_POOLS;
+       else
+               dev_info->max_vmdq_pools = ETH_64_POOLS;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1937,6 +2028,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        int err;
        uint32_t rx_buf_size;
        uint32_t max_high_water;
+       uint32_t mflcn;
        enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
                ixgbe_fc_none,
                ixgbe_fc_rx_pause,
@@ -1969,8 +2061,24 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        hw->fc.send_xon       = fc_conf->send_xon;
 
        err = ixgbe_fc_enable(hw);
+
        /* Not negotiated is not an error case */
        if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+               /* check if we want to forward MAC frames - driver doesn't have native
+                * capability to do that, so we'll write the registers ourselves */
+
+               mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+               /* set or clear MFLCN.PMCF bit depending on configuration */
+               if (fc_conf->mac_ctrl_frame_fwd != 0)
+                       mflcn |= IXGBE_MFLCN_PMCF;
+               else
+                       mflcn &= ~IXGBE_MFLCN_PMCF;
+
+               IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+               IXGBE_WRITE_FLUSH(hw);
+
                return 0;
        }
 
@@ -2473,3 +2581,398 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
+
+static int
+ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+{
+       uint32_t reg_val;
+       
+       /* we only need to do this if VMDq is enabled */
+       reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+               return (-1);
+       }
+       
+       return 0;
+}
+
+static uint32_t 
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+{
+       uint32_t vector = 0;
+       switch (hw->mac.mc_filter_type) {
+       case 0:   /* use bits [47:36] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 4) | 
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+               break;
+       case 1:   /* use bits [46:35] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 3) | 
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+               break;
+       case 2:   /* use bits [45:34] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 2) | 
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+               break;
+       case 3:   /* use bits [43:32] of the address */
+               vector = ((uc_addr->addr_bytes[4]) | 
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+               break;
+       default:  /* Invalid mc_filter_type */
+               break;
+       }
+
+       /* vector can only be 12-bits or boundary will be exceeded */
+       vector &= 0xFFF;
+       return vector;
+}
+
+static int 
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
+                              uint8_t on)
+{
+       uint32_t vector;
+       uint32_t uta_idx;
+       uint32_t reg_val;
+       uint32_t uta_shift;
+       uint32_t rc;
+       const uint32_t ixgbe_uta_idx_mask = 0x7F;
+       const uint32_t ixgbe_uta_bit_shift = 5;
+       const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+       const uint32_t bit1 = 0x1;
+       
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_uta_info *uta_info =
+               IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+       
+       /* The UTA table only exists on 82599 hardware and newer */
+       if (hw->mac.type < ixgbe_mac_82599EB)
+               return (-ENOTSUP);
+       
+       vector = ixgbe_uta_vector(hw,mac_addr);
+       uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+       uta_shift = vector & ixgbe_uta_bit_mask;
+       
+       rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+       if(rc == on)
+               return 0;
+       
+       reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+       if (on) {
+               uta_info->uta_in_use++;
+               reg_val |= (bit1 << uta_shift);
+               uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+       } else {
+               uta_info->uta_in_use--;
+               reg_val &= ~(bit1 << uta_shift);
+               uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+       }
+       
+       IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+       
+       if (uta_info->uta_in_use > 0)
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+       
+       return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+       int i;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_uta_info *uta_info =
+               IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+       /* The UTA table only exists on 82599 hardware and newer */
+       if (hw->mac.type < ixgbe_mac_82599EB)
+               return (-ENOTSUP);
+       
+       if(on) {
+               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+                       uta_info->uta_shadow[i] = ~0;
+                       IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+               }
+       } else {
+               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+                       uta_info->uta_shadow[i] = 0;
+                       IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+               }
+       }
+       return 0;
+       
+}
+static int
+ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+                              uint16_t rx_mask, uint8_t on)
+{
+       int val = 0;
+       
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+       
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+                       " on 82599 hardware and newer\n");
+               return (-ENOTSUP);
+       }
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
+               val |= IXGBE_VMOLR_AUPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
+               val |= IXGBE_VMOLR_ROMPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+               val |= IXGBE_VMOLR_ROPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               val |= IXGBE_VMOLR_BAM;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               val |= IXGBE_VMOLR_MPE;
+
+       if (on)
+               vmolr |= val;
+       else 
+               vmolr &= ~val;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+       
+       return 0;
+}
+
+static int
+ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+       uint32_t reg,addr;
+       uint32_t val;
+       const uint8_t bit1 = 0x1;
+       
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+       
+       addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+       reg = IXGBE_READ_REG(hw, addr);
+       val = bit1 << pool;
+
+       if (on)
+               reg |= val;
+       else
+               reg &= ~val;
+       
+       IXGBE_WRITE_REG(hw, addr,reg);
+       
+       return 0;
+}
+
+static int
+ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+       uint32_t reg,addr;
+       uint32_t val;
+       const uint8_t bit1 = 0x1;
+       
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+       
+       addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+       reg = IXGBE_READ_REG(hw, addr);
+       val = bit1 << pool;
+
+       if (on)
+               reg |= val;
+       else
+               reg &= ~val;
+       
+       IXGBE_WRITE_REG(hw, addr,reg);
+       
+       return 0;
+}
+
+static int 
+ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+                       uint64_t pool_mask, uint8_t vlan_on)
+{
+       int ret = 0;
+       uint16_t pool_idx;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+       for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
+               if (pool_mask & ((uint64_t)(1ULL << pool_idx))) 
+                       ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+                       if (ret < 0) 
+                               return ret;     
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+                       struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+                       uint8_t rule_id, uint8_t on)
+{
+       uint32_t mr_ctl,vlvf;
+       uint32_t mp_lsb = 0;
+       uint32_t mv_msb = 0;
+       uint32_t mv_lsb = 0;
+       uint32_t mp_msb = 0;
+       uint8_t i = 0;
+       int reg_index = 0;
+       uint64_t vlan_mask = 0;
+       
+       const uint8_t pool_mask_offset = 32;
+       const uint8_t vlan_mask_offset = 32;
+       const uint8_t dst_pool_offset = 8;
+       const uint8_t rule_mr_offset  = 4;
+       const uint8_t mirror_rule_mask= 0x0F;
+
+       struct ixgbe_mirror_info *mr_info =
+                       (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+
+       /* Check if vlan mask is valid */
+       if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
+               if (mirror_conf->vlan.vlan_mask == 0)
+                       return (-EINVAL);
+       }
+
+       /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+       if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+               for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+                       if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+                               /* search vlan id related pool vlan filter index */
+                               reg_index = ixgbe_find_vlvf_slot(hw,
+                                               mirror_conf->vlan.vlan_id[i]);
+                               if(reg_index < 0)
+                                       return (-EINVAL);
+                               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+                               if ((vlvf & IXGBE_VLVF_VIEN) &&
+                                       ((vlvf & IXGBE_VLVF_VLANID_MASK)
+                                               == mirror_conf->vlan.vlan_id[i]))
+                                       vlan_mask |= (1ULL << reg_index);
+                               else
+                                       return (-EINVAL);
+                       }
+               }
+
+               if (on) {
+                       mv_lsb = vlan_mask & 0xFFFFFFFF;
+                       mv_msb = vlan_mask >> vlan_mask_offset;
+                       
+                       mr_info->mr_conf[rule_id].vlan.vlan_mask =
+                                               mirror_conf->vlan.vlan_mask;
+                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+                               if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+                                       mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+                                               mirror_conf->vlan.vlan_id[i];
+                       }
+               } else {
+                       mv_lsb = 0;
+                       mv_msb = 0;
+                       mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+                               mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+               }
+       }
+
+       /*
+        * if enable pool mirror, write related pool mask register,if disable 
+        * pool mirror, clear PFMRVM register
+        */
+       if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+               if (on) { 
+                       mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+                       mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+                       mr_info->mr_conf[rule_id].pool_mask = 
+                                       mirror_conf->pool_mask;
+                       
+               } else {
+                       mp_lsb = 0;
+                       mp_msb = 0;
+                       mr_info->mr_conf[rule_id].pool_mask = 0;
+               }
+       }
+       
+       /* read  mirror control register and recalculate it */
+       mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+
+       if (on) {
+               mr_ctl |= mirror_conf->rule_type_mask;
+               mr_ctl &= mirror_rule_mask;
+               mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+       } else
+               mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+
+       mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+       mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+       /* write mirrror control  register */
+       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+       
+        /* write pool mirrror control  register */
+       if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+                               mp_msb);
+       }
+       /* write VLAN mirrror control  register */
+       if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+                               mv_msb);
+       }
+
+       return 0;
+}
+
+static int 
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+       int mr_ctl = 0;
+       uint32_t lsb_val = 0;
+       uint32_t msb_val = 0;
+       const uint8_t rule_mr_offset = 4;
+       
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_mirror_info *mr_info = 
+               (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+       
+       if (ixgbe_vmdq_mode_check(hw) < 0)
+               return (-ENOTSUP);
+
+       memset(&mr_info->mr_conf[rule_id], 0,
+               sizeof(struct rte_eth_vmdq_mirror_conf));
+
+       /* clear PFVMCTL register */
+       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+       /* clear pool mask register */
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+       /* clear vlan mask register */
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+       return 0;
+}