* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <sys/queue.h>
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
+
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
- .rx_queue_count = ixgbe_dev_rx_queue_count,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
};
}
}
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = ixgbe_reset_hw(hw);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
static inline void
ixgbe_enable_intr(struct rte_eth_dev *dev)
{
/* initialize the hw strip bitmap*/
memset(hwstrip, 0, sizeof(*hwstrip));
- /* let hardware know driver is loaded */
+ /* initialize PF if max_vfs not zero */
+ ixgbe_pf_host_init(eth_dev);
+
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* let hardware know driver is loaded */
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
PMD_INIT_LOG(DEBUG,
hw->mac.num_rar_entries = hw->mac.max_rx_queues;
diag = hw->mac.ops.reset_hw(hw);
+
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n"
+ "\tThe most likely cause of this error is that the VM host\n"
+ "\thas not assigned a valid MAC address to this VF device.\n"
+ "\tPlease consult the DPDK Release Notes (FAQ section) for\n"
+ "\ta possible solution to this problem.\n");
return (diag);
}
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
+
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
ð_dev->data->mac_addrs[0]);
IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
}
-static void
+void
ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
}
-static void
+void
ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-static void
+void
ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
}
}
-static void
+void
ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
int mask = 0;
+ int status;
PMD_INIT_FUNC_TRACE();
/* reinitialize adapter
* this calls reset and start */
- ixgbe_init_hw(hw);
+ status = ixgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
hw->mac.ops.start_hw(hw);
+ /* configure PF module if SRIOV enabled */
+ ixgbe_pf_host_configure(dev);
+
/* initialize transmission unit */
ixgbe_dev_tx_init(dev);
}
/* Turn on the laser */
- if (hw->phy.multispeed_fiber)
- ixgbe_enable_tx_laser(hw);
+ ixgbe_enable_tx_laser(hw);
err = ixgbe_check_link(hw, &speed, &link_up, 0);
if (err)
ixgbe_disable_intr(hw);
/* reset the NIC */
- ixgbe_reset_hw(hw);
+ ixgbe_pf_reset_hw(hw);
hw->adapter_stopped = FALSE;
/* stop adapter */
ixgbe_stop_adapter(hw);
/* Turn off the laser */
- if (hw->phy.multispeed_fiber)
- ixgbe_disable_tx_laser(hw);
+ ixgbe_disable_tx_laser(hw);
ixgbe_dev_clear_queues(dev);
PMD_INIT_FUNC_TRACE();
- ixgbe_reset_hw(hw);
-
+ ixgbe_pf_reset_hw(hw);
ixgbe_dev_stop(dev);
hw->adapter_stopped = 1;
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
}
+ if (eicr & IXGBE_EICR_MAILBOX)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
return 0;
}
PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbe_pf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
/* get the link status before link update, for predicting later */
memset(&link, 0, sizeof(link));
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_pf_mbx_process(dev);
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
ixgbe_dev_link_update(dev, 0);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
- PMD_DRV_LOG(DEBUG, "enable intr in delayed handler\n");
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
ixgbe_enable_intr(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
}