lib: fix various compilation warnings
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index b0fc67b..6770c22 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
  *   All rights reserved.
  * 
  *   Redistribution and use in source and binary forms, with or without 
@@ -114,7 +114,7 @@ static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
-                                 int on);
+               int on);
 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
 static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
@@ -128,7 +128,7 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
 static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-                               struct rte_eth_fc_conf *fc_conf);
+               struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
@@ -136,10 +136,10 @@ static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
-                                                       void *param);
+               void *param);
 static void ixgbe_dev_interrupt_delayed_handler(void *param);
 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-                               uint32_t index, uint32_t pool);
+               uint32_t index, uint32_t pool);
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
 
@@ -149,8 +149,10 @@ static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+               struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 
                uint16_t vlan_id, int on);
@@ -161,8 +163,8 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 
 
 /*
- *  * Define VF Stats MACRO for Non "cleared on read" register
- *   */
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
 #define UPDATE_VF_STAT(reg, last, cur)                         \
 {                                                               \
        u32 latest = IXGBE_READ_REG(hw, reg);                   \
@@ -202,8 +204,7 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
  */
 static struct rte_pci_id pci_id_ixgbe_map[] = {
 
-#undef RTE_LIBRTE_IGB_PMD
-#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
 #include "rte_pci_dev_ids.h"
 
 { .vendor_id = 0, /* sentinel */ },
@@ -214,13 +215,11 @@ static struct rte_pci_id pci_id_ixgbe_map[] = {
  * The set of PCI devices this driver supports (for 82599 VF)
  */
 static struct rte_pci_id pci_id_ixgbevf_map[] = {
-{
-       .vendor_id = PCI_VENDOR_ID_INTEL,
-       .device_id = IXGBE_DEV_ID_82599_VF,
-       .subsystem_vendor_id = PCI_ANY_ID,
-       .subsystem_device_id = PCI_ANY_ID,
-},
+
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
 { .vendor_id = 0, /* sentinel */ },
+
 };
 
 static struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -273,8 +272,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .link_update          = ixgbe_dev_link_update,
        .stats_get            = ixgbevf_dev_stats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
-       .dev_close            = ixgbevf_dev_stop,
-
+       .dev_close            = ixgbevf_dev_close,
        .dev_infos_get        = ixgbe_dev_info_get,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -416,12 +414,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
 
-       n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
+       n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
                PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
                return -EIO;
        }
-       offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
+       offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
 
        /* Now clear any previous stat_idx set */
        clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
@@ -480,16 +478,18 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
 {
        uint8_t i;
        struct ixgbe_dcb_tc_config *tc;
-       int dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+       uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
 
        dcb_config->num_tcs.pg_tcs = dcb_max_tc;
        dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
        for (i = 0; i < dcb_max_tc; i++) {
                tc = &dcb_config->tc_config[i];
                tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
-               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+                                (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
-               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 
+                                (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->pfc = ixgbe_dcb_pfc_disabled;
        }
 
@@ -710,6 +710,10 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 
        hw->mac.num_rar_entries = hw->mac.max_rx_queues;
        diag = hw->mac.ops.reset_hw(hw);
+       if (diag != IXGBE_SUCCESS) {
+               PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+               return (diag);
+       }
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
@@ -732,7 +736,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
 
                default:
                        PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return (diag);
+                       return (-EIO);
        }
 
        PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
@@ -1093,7 +1097,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
-
+       int mask = 0;
+       
        PMD_INIT_FUNC_TRACE();
 
        /* IXGBE devices don't support half duplex */
@@ -1120,7 +1125,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        err = ixgbe_dev_rx_init(dev);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
-               return err;
+               goto error;
        }
 
        ixgbe_dev_rxtx_start(dev);
@@ -1164,7 +1169,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        default:
                PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
                                dev->data->dev_conf.link_speed, dev->data->port_id);
-               return -EINVAL;
+               goto error;
        }
 
        err = ixgbe_setup_link(hw, speed, negotiate, link_up);
@@ -1323,12 +1328,12 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
-               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
-               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
-               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
-                   ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+               hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+               hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+               hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
                hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
        } else {
@@ -1503,8 +1508,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->max_rx_queues = hw->mac.max_rx_queues;
-       dev_info->max_tx_queues = hw->mac.max_tx_queues;
+       dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
@@ -1789,7 +1794,8 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
  *  void
  */
 static void
-ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
+ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                                                       void *param)
 {
        int64_t timeout;
        struct rte_eth_link link;
@@ -2129,11 +2135,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
+       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+               dev->data->port_id);
 
-               /*
-                * VF has no ability to enable/disable HW CRC
-                * Keep the persistent behavior the same as Host PF
-                */
+       /*
+        * VF has no ability to enable/disable HW CRC
+        * Keep the persistent behavior the same as Host PF
+        */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
                PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
@@ -2152,14 +2160,17 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
-       int err = 0;
+       int err, mask = 0;
+       
        PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
 
        ixgbevf_dev_tx_init(dev);
+
+       /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
-       if(err){
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
                ixgbe_dev_clear_queues(dev);
-               PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
                return err;
        }
        
@@ -2182,10 +2193,30 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+               
+       hw->adapter_stopped = TRUE;
+       ixgbe_stop_adapter(hw);
+
+       /* 
+         * Clear what we set, but we still keep shadow_vfta to 
+         * restore after device starts
+         */
+       ixgbevf_set_vfta_all(dev,0);
+
+       ixgbe_dev_clear_queues(dev);
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
 
        ixgbe_reset_hw(hw);
-       hw->adapter_stopped = 0;
-       ixgbe_stop_adapter(hw);
+
+       ixgbevf_dev_stop(dev);
+
        /* reprogram the RAR[0] in case user changed it. */
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 }
@@ -2269,7 +2300,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t i, on = 0;
+       uint16_t i;
+       int on = 0;
 
        /* VF function only support hw strip feature, others are not support */
        if(mask & ETH_VLAN_STRIP_MASK){
@@ -2279,4 +2311,3 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
-