ethdev: MTU accessors
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index eab4ba8..fca8fd7 100644 (file)
@@ -39,6 +39,7 @@
 #include <unistd.h>
 #include <stdarg.h>
 #include <inttypes.h>
+#include <netinet/in.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
@@ -88,6 +89,8 @@
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
@@ -116,6 +119,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
+
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
@@ -131,8 +137,10 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
@@ -185,10 +193,32 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+               uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+               uint16_t tx_rate, uint64_t q_msk);
+
 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
                                 struct ether_addr *mac_addr,
                                 uint32_t index, uint32_t pool);
 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
+static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_ethertype_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index);
+static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -266,6 +296,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
@@ -282,6 +313,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_get        = ixgbe_flow_ctrl_get,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
@@ -294,6 +326,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .set_vf_rx            = ixgbe_set_pool_rx,
        .set_vf_tx            = ixgbe_set_pool_tx,
        .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+       .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+       .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
        .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
        .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
        .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
@@ -317,6 +351,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
 #endif /* RTE_NIC_BYPASS */
        .rss_hash_update      = ixgbe_dev_rss_hash_update,
        .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+       .add_syn_filter          = ixgbe_add_syn_filter,
+       .remove_syn_filter       = ixgbe_remove_syn_filter,
+       .get_syn_filter          = ixgbe_get_syn_filter,
+       .add_ethertype_filter    = ixgbe_add_ethertype_filter,
+       .remove_ethertype_filter = ixgbe_remove_ethertype_filter,
+       .get_ethertype_filter    = ixgbe_get_ethertype_filter,
+       .add_5tuple_filter       = ixgbe_add_5tuple_filter,
+       .remove_5tuple_filter    = ixgbe_remove_5tuple_filter,
+       .get_5tuple_filter       = ixgbe_get_5tuple_filter,
 };
 
 /*
@@ -1003,7 +1046,7 @@ static struct eth_driver rte_ixgbe_pmd = {
        {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -1016,7 +1059,7 @@ static struct eth_driver rte_ixgbevf_pmd = {
        {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -1359,10 +1402,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
        int status;
+       uint16_t vf, idx;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1479,6 +1525,16 @@ skip_link_setup:
                        goto error;
        }
 
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       ixgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
        ixgbe_restore_statistics_mapping(dev);
 
        return (0);
@@ -1769,9 +1825,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
-               hw_stats->rlec;
-
+       stats->ibadcrc  = hw_stats->crcerrs;
+       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+       stats->imissed  = total_missed_rx;
+       stats->ierrors  = stats->ibadcrc +
+                         stats->ibadlen +
+                         stats->imissed +
+                         hw_stats->illerrc + hw_stats->errbc;
+
+       /* Tx Errors */
        stats->oerrors  = 0;
 
        /* XON/XOFF pause frames */
@@ -2236,6 +2298,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }
 
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * MFLCN register.
+        */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * FCCFG register.
+        */
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
 static int
 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
@@ -2254,6 +2365,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
 
@@ -2597,6 +2710,52 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */
@@ -3195,6 +3354,108 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        return 0;
 }
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+       uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rf_dec, rf_int;
+       uint32_t bcnrc_val;
+       uint16_t link_speed = dev->data->dev_link.link_speed;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+        * set as 0x4.
+        */
+       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_DEFAULT);
+
+       /* Set RTTBCNRC of queue X */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       uint32_t queue_stride =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+       uint16_t total_rate = 0;
+
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo != NULL) {
+               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else
+               return -EINVAL;
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /*
+                * Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set RTTBCNRC of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
 static void
 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                     __attribute__((unused)) uint32_t index,
@@ -3264,6 +3525,418 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
        }
 }
 
+/*
+ * add syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       if (synqf & IXGBE_SYN_FILTER_ENABLE)
+               return -EINVAL;
+
+       synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+               IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+       if (filter->hig_pri)
+               synqf |= IXGBE_SYN_FILTER_SYNQFP;
+       else
+               synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * remove syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue)
+
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+       if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+               filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+               *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+/*
+ * add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index, struct rte_ethertype_filter *filter,
+                       uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t etqf, etqs = 0;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS ||
+               rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+       if (etqf & IXGBE_ETQF_FILTER_EN)
+               return -EINVAL;  /* filter index is in use. */
+
+       etqf = 0;
+       etqf |= IXGBE_ETQF_FILTER_EN;
+       etqf |= (uint32_t)filter->ethertype;
+
+       if (filter->priority_en) {
+               if (filter->priority > IXGBE_ETQF_MAX_PRI)
+                       return -EINVAL;
+               etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
+               etqf |= IXGBE_ETQF_UP_EN;
+       }
+       etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
+       etqs |= IXGBE_ETQS_QUEUE_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
+       return 0;
+}
+
+/*
+ * remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS)
+               return -EINVAL;
+
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
+
+       return 0;
+}
+
+/*
+ * get an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be gotten.
+ * *rx_queue: the ponited of the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index, struct rte_ethertype_filter *filter,
+                       uint16_t *rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t etqf, etqs;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS)
+               return -EINVAL;
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+       etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
+       if (etqf & IXGBE_ETQF_FILTER_EN) {
+               filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
+               filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
+               if (filter->priority_en)
+                       filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
+               *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+       if (protocol_value == IPPROTO_TCP)
+               return IXGBE_FILTER_PROTOCOL_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return IXGBE_FILTER_PROTOCOL_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return IXGBE_FILTER_PROTOCOL_SCTP;
+       else
+               return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+static inline uint8_t
+revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+{
+       if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
+               return IPPROTO_TCP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
+               return IPPROTO_UDP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
+               return IPPROTO_SCTP;
+       else
+               return 0;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t ftqf, sdpqf = 0;
+       uint32_t l34timir = 0;
+       uint8_t mask = 0xff;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS ||
+               rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < IXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;  /* filter index is out of range. */
+
+       if (filter->tcp_flags) {
+               PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+               return -EINVAL;
+       }
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+               return -EINVAL;  /* filter index is in use. */
+
+       ftqf = 0;
+       sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
+       sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+
+       ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
+               IXGBE_FTQF_PROTOCOL_MASK);
+       ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
+               IXGBE_FTQF_PRIORITY_SHIFT);
+       if (filter->src_ip_mask == 0) /* 0 means compare. */
+               mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+       if (filter->dst_ip_mask == 0)
+               mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+       if (filter->src_port_mask == 0)
+               mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+       if (filter->dst_port_mask == 0)
+               mask &= IXGBE_FTQF_DEST_PORT_MASK;
+       if (filter->protocol_mask == 0)
+               mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+       ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+       ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+       ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+
+       l34timir |= IXGBE_L34T_IMIR_RESERVE;
+       l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+       return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t sdpqf, ftqf, l34timir;
+       uint8_t mask;
+       enum ixgbe_5tuple_protocol proto;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+               proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
+               filter->protocol = revert_protocol_type(proto);
+               filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
+                                       IXGBE_FTQF_PRIORITY_MASK;
+               mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
+                                       IXGBE_FTQF_5TUPLE_MASK_MASK);
+               filter->src_ip_mask =
+                       (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+               filter->dst_ip_mask =
+                       (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+               filter->src_port_mask =
+                       (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+               filter->dst_port_mask =
+                       (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
+               filter->protocol_mask =
+                       (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+
+               sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
+               filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
+                                       IXGBE_SDPQF_DSTPORT_SHIFT;
+               filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
+               filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
+               filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
+
+               l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
+               *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
+                                       IXGBE_L34T_IMIR_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,