lib: fix various compilation warnings
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index bbe92e3..6770c22 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
  *   All rights reserved.
  * 
  *   Redistribution and use in source and binary forms, with or without 
@@ -114,7 +114,7 @@ static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
-                                 int on);
+               int on);
 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
@@ -128,17 +128,20 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
 static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-                               struct rte_eth_fc_conf *fc_conf);
+               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+               struct rte_eth_pfc_conf *pfc_conf);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
-                                                       void *param);
+               void *param);
 static void ixgbe_dev_interrupt_delayed_handler(void *param);
 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-                               uint32_t index, uint32_t pool);
+               uint32_t index, uint32_t pool);
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
 
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
@@ -146,8 +149,10 @@ static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+               struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 
                uint16_t vlan_id, int on);
@@ -158,8 +163,8 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 
 
 /*
- *  * Define VF Stats MACRO for Non "cleared on read" register
- *   */
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
 #define UPDATE_VF_STAT(reg, last, cur)                         \
 {                                                               \
        u32 latest = IXGBE_READ_REG(hw, reg);                   \
@@ -199,8 +204,7 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
  */
 static struct rte_pci_id pci_id_ixgbe_map[] = {
 
-#undef RTE_LIBRTE_IGB_PMD
-#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
 #include "rte_pci_dev_ids.h"
 
 { .vendor_id = 0, /* sentinel */ },
@@ -211,13 +215,11 @@ static struct rte_pci_id pci_id_ixgbe_map[] = {
  * The set of PCI devices this driver supports (for 82599 VF)
  */
 static struct rte_pci_id pci_id_ixgbevf_map[] = {
-{
-       .vendor_id = PCI_VENDOR_ID_INTEL,
-       .device_id = IXGBE_DEV_ID_82599_VF,
-       .subsystem_vendor_id = PCI_ANY_ID,
-       .subsystem_device_id = PCI_ANY_ID,
-},
+
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
 { .vendor_id = 0, /* sentinel */ },
+
 };
 
 static struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -245,6 +247,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
+       .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
        .mac_addr_remove      = ixgbe_remove_rar,
        .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
@@ -269,8 +272,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .link_update          = ixgbe_dev_link_update,
        .stats_get            = ixgbevf_dev_stats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
-       .dev_close            = ixgbevf_dev_stop,
-
+       .dev_close            = ixgbevf_dev_close,
        .dev_infos_get        = ixgbe_dev_info_get,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -412,12 +414,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
 
-       n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
+       n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
                PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
                return -EIO;
        }
-       offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
+       offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
 
        /* Now clear any previous stat_idx set */
        clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
@@ -471,6 +473,48 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
        }
 }
 
+static void
+ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+{
+       uint8_t i;
+       struct ixgbe_dcb_tc_config *tc;
+       uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+       dcb_config->num_tcs.pg_tcs = dcb_max_tc;
+       dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
+       for (i = 0; i < dcb_max_tc; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                (uint8_t)(100/dcb_max_tc + (i & 1));
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 
+                                (uint8_t)(100/dcb_max_tc + (i & 1));
+               tc->pfc = ixgbe_dcb_pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &dcb_config->tc_config[0];
+       tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+       for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+               dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
+               dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
+       }
+       dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
+       dcb_config->pfc_mode_enable = false;
+       dcb_config->vt_mode = true;
+       dcb_config->round_robin_enable = false;
+       /* support all DCB capabilities in 82599 */
+       dcb_config->support.capabilities = 0xFF;
+
+       /*we only support 4 Tcs for X540*/              
+       if (hw->mac.type == ixgbe_mac_X540) {
+               dcb_config->num_tcs.pg_tcs = 4;
+               dcb_config->num_tcs.pfc_tcs = 4;
+       }
+} 
+
 /*
  * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
  * It returns 0 on success.
@@ -520,13 +564,17 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                return -EIO;
        }
 
+       /* Initialize DCB configuration*/
+       memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
+       ixgbe_dcb_init(hw,dcb_config);
        /* Get Hardware Flow Control setting */
        hw->fc.requested_mode = ixgbe_fc_full;
        hw->fc.current_mode = ixgbe_fc_full;
        hw->fc.pause_time = IXGBE_FC_PAUSE;
-       hw->fc.low_water = IXGBE_FC_LO;
-       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               hw->fc.low_water[i] = IXGBE_FC_LO;
                hw->fc.high_water[i] = IXGBE_FC_HI;
+       }
        hw->fc.send_xon = 1;
 
        ixgbe_disable_intr(hw);
@@ -662,6 +710,10 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 
        hw->mac.num_rar_entries = hw->mac.max_rx_queues;
        diag = hw->mac.ops.reset_hw(hw);
+       if (diag != IXGBE_SUCCESS) {
+               PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+               return (diag);
+       }
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
@@ -684,7 +736,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 
                default:
                        PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return (diag);
+                       return (-EIO);
        }
 
        PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
@@ -1045,7 +1097,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
-
+       int mask = 0;
+       
        PMD_INIT_FUNC_TRACE();
 
        /* IXGBE devices don't support half duplex */
@@ -1072,7 +1125,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        err = ixgbe_dev_rx_init(dev);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
-               return err;
+               goto error;
        }
 
        ixgbe_dev_rxtx_start(dev);
@@ -1116,7 +1169,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        default:
                PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
                                dev->data->dev_conf.link_speed, dev->data->port_id);
-               return -EINVAL;
+               goto error;
        }
 
        err = ixgbe_setup_link(hw, speed, negotiate, link_up);
@@ -1133,6 +1186,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
                ETH_VLAN_EXTEND_MASK;
        ixgbe_vlan_offload_set(dev, mask);
+       
+       /* Configure DCB hw */
+       ixgbe_configure_dcb(dev); 
 
        if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
                err = ixgbe_fdir_configure(dev);
@@ -1272,12 +1328,12 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
-               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
-               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
-               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+               hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+               hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+               hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
                hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
        } else {
@@ -1452,8 +1508,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->max_rx_queues = hw->mac.max_rx_queues;
-       dev_info->max_tx_queues = hw->mac.max_tx_queues;
+       dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
@@ -1738,7 +1794,8 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
  *  void
  */
 static void
-ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
+ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                                                       void *param)
 {
        int64_t timeout;
        struct rte_eth_link link;
@@ -1823,10 +1880,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
        hw->fc.pause_time     = fc_conf->pause_time;
        hw->fc.high_water[0]  = fc_conf->high_water;
-       hw->fc.low_water      = fc_conf->low_water;
+       hw->fc.low_water[0]   = fc_conf->low_water;
        hw->fc.send_xon       = fc_conf->send_xon;
 
-       err = ixgbe_fc_enable(hw, 0);
+       err = ixgbe_fc_enable(hw);
        /* Not negotiated is not an error case */
        if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
                return 0;
@@ -1836,6 +1893,211 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        return -EIO;
 }
 
+/**
+ *  ixgbe_pfc_enable_generic - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @tc_num: traffic class number
+ *  Enable flow control according to the current settings.
+ */
+static int 
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+{
+       int ret_val = 0;
+       uint32_t mflcn_reg, fccfg_reg;
+       uint32_t reg;
+       uint32_t fcrtl, fcrth;
+       uint8_t i;
+       uint8_t nb_rx_en;
+       
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /* Low water mark of zero causes XOFF floods */
+       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+                /* High/Low water can not be 0 */
+               if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                       goto out;
+               }
+               if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                       goto out;
+               }
+       }
+       /* Negotiate the fc mode to use */
+       ixgbe_fc_autoneg(hw);
+
+       /* Disable any previous flow control settings */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
+
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+       switch (hw->fc.current_mode) {
+       case ixgbe_fc_none:
+               /*
+                * If the count of enabled RX Priority Flow control >1,
+                * and the TX pause can not be disabled 
+                */
+               nb_rx_en = 0;
+               for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+                       reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+                       if (reg & IXGBE_FCRTH_FCEN)
+                               nb_rx_en++;
+               }
+               if (nb_rx_en > 1)
+                       fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               mflcn_reg |= IXGBE_MFLCN_RPFCE;
+               /*
+                * If the count of enabled RX Priority Flow control >1,
+                * and the TX pause can not be disabled
+                */
+               nb_rx_en = 0;
+               for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+                       reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+                       if (reg & IXGBE_FCRTH_FCEN)
+                               nb_rx_en++;
+               }
+               if (nb_rx_en > 1)
+                       fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               mflcn_reg |= IXGBE_MFLCN_RPFCE;
+               fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       /* Set 802.3x based flow control settings. */
+       mflcn_reg |= IXGBE_MFLCN_DPF;
+       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+       /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+       if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+               hw->fc.high_water[tc_num]) {
+               fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
+               fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
+               /*
+                * In order to prevent Tx hangs when the internal Tx
+                * switch is enabled we must set the high water mark
+                * to the maximum FCRTH value.  This allows the Tx
+                * switch to function even under heavy Rx workloads.
+                */
+               fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
+
+       /* Configure pause time (2 TCs per register) */
+       reg = hw->fc.pause_time * 0x00010001;
+       for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+               IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+       /* Configure flow control refresh threshold value */
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+       return ret_val;
+}
+
+static int 
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
+
+       if(hw->mac.type != ixgbe_mac_82598EB) {
+               ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+       }
+       return ret_val;
+}
+
+static int 
+ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+{
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       uint8_t tc_num;
+       uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+       struct ixgbe_hw *hw =
+                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_dcb_config *dcb_config =
+                IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+       
+       enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+               ixgbe_fc_none,
+               ixgbe_fc_rx_pause,
+               ixgbe_fc_tx_pause,
+               ixgbe_fc_full
+       };
+       
+       PMD_INIT_FUNC_TRACE();
+       
+       ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+       tc_num = map[pfc_conf->priority];
+       rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for ixgbe
+        */
+       max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+       if ((pfc_conf->fc.high_water > max_high_water) ||
+               (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
+               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               return (-EINVAL);
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
+       hw->fc.pause_time = pfc_conf->fc.pause_time;
+       hw->fc.send_xon = pfc_conf->fc.send_xon;
+       hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
+       hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+               
+       err = ixgbe_dcb_pfc_enable(dev,tc_num);
+       
+       /* Not negotiated is not an error case */
+       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 
+               return 0;
+
+       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+       return -EIO;
+}      
+
 static void
 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
@@ -1873,15 +2135,24 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
+       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+               dev->data->port_id);
 
+       /*
+        * VF has no ability to enable/disable HW CRC
+        * Keep the persistent behavior the same as Host PF
+        */
+#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               /*
-                * VF has no ability to enable/disable HW CRC
-                * Keep the persistent behavior the same as Host PF
-                */
                PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
                conf->rxmode.hw_strip_crc = 1;
        }
+#else
+       if (conf->rxmode.hw_strip_crc) {
+               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+               conf->rxmode.hw_strip_crc = 0;
+       }
+#endif
 
        return 0;
 }
@@ -1889,14 +2160,17 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
-       int err = 0;
+       int err, mask = 0;
+       
        PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
 
        ixgbevf_dev_tx_init(dev);
+
+       /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
-       if(err){
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
                ixgbe_dev_clear_queues(dev);
-               PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
                return err;
        }
        
@@ -1919,10 +2193,30 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+               
+       hw->adapter_stopped = TRUE;
+       ixgbe_stop_adapter(hw);
+
+       /* 
+         * Clear what we set, but we still keep shadow_vfta to 
+         * restore after device starts
+         */
+       ixgbevf_set_vfta_all(dev,0);
+
+       ixgbe_dev_clear_queues(dev);
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
 
        ixgbe_reset_hw(hw);
-       hw->adapter_stopped = 0;
-       ixgbe_stop_adapter(hw);
+
+       ixgbevf_dev_stop(dev);
+
        /* reprogram the RAR[0] in case user changed it. */
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 }
@@ -2006,7 +2300,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t i, on = 0;
+       uint16_t i;
+       int on = 0;
 
        /* VF function only support hw strip feature, others are not support */
        if(mask & ETH_VLAN_STRIP_MASK){
@@ -2016,4 +2311,3 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
-