/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_ethdev.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
+#include <rte_dev.h>
#include "ixgbe_logs.h"
#include "ixgbe/ixgbe_api.h"
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
+static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
.rx_queue_release = ixgbe_dev_rx_queue_release,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
+ .mac_addr_add = ixgbevf_add_mac_addr,
+ .mac_addr_remove = ixgbevf_remove_mac_addr,
};
/**
}
}
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+{
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock. Release of common lock
+ * is done automatically by swfw_sync function.
+ */
+ mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+
+ /*
+ * These ones are more tricky since they are common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ DEBUGOUT("SWFW common locks released");
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
/*
* This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
* It returns 0 on success.
return -EIO;
}
+ /* pick up the PCI bus settings for reporting later */
+ ixgbe_get_bus_info(hw);
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
ixgbe_dcb_init(hw,dcb_config);
/* disable interrupt */
ixgbe_disable_intr(hw);
- /* pick up the PCI bus settings for reporting later */
- ixgbe_get_bus_info(hw);
-
/* reset mappings for queue statistics hw counters*/
ixgbe_reset_qstat_mappings(hw);
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
return 0;
}
+
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (ixgbe_mbox_api_10).
+ */
+static void
+ixgbevf_negotiate_api(struct ixgbe_hw *hw)
+{
+ int32_t i;
+
+ /* start with highest supported, proceed down */
+ static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ };
+
+ for (i = 0;
+ i != RTE_DIM(sup_ver) &&
+ ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
+ i++)
+ ;
+}
+
/*
* Virtual Function device init
*/
eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int diag;
+ uint32_t tc, tcs;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
/* Disable the interrupts for VF */
ixgbevf_intr_disable(hw);
- hw->mac.num_rar_entries = hw->mac.max_rx_queues;
+ hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
diag = hw->mac.ops.reset_hw(hw);
if (diag != IXGBE_SUCCESS) {
return (diag);
}
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
+ /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+ ixgbevf_get_queues(hw, &tcs, &tc);
+
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
{
.name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
-#ifdef RTE_EAL_UNBIND_PORTS
.drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-#endif
},
.eth_dev_init = eth_ixgbe_dev_init,
.dev_private_size = sizeof(struct ixgbe_adapter),
{
.name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
-#ifdef RTE_EAL_UNBIND_PORTS
.drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-#endif
},
.eth_dev_init = eth_ixgbevf_dev_init,
.dev_private_size = sizeof(struct ixgbe_adapter),
* Invoked once at EAL init time.
* Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
*/
-int
-rte_ixgbe_pmd_init(void)
+static int
+rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
{
PMD_INIT_FUNC_TRACE();
* Invoked one at EAL init time.
* Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
*/
-int
-rte_ixgbevf_pmd_init(void)
+static int
+rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
{
DEBUGFUNC("rte_ixgbevf_pmd_init");
/* IXGBE devices don't support half duplex */
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
- PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
dev->data->dev_conf.link_duplex,
dev->data->port_id);
return -EINVAL;
/* Turn on the laser */
ixgbe_enable_tx_laser(hw);
+ /* Skip link setup if loopback mode is enabled for 82599. */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ goto skip_link_setup;
+
err = ixgbe_check_link(hw, &speed, &link_up, 0);
if (err)
goto error;
speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
default:
- PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
- dev->data->dev_conf.link_speed, dev->data->port_id);
+ PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
+ dev->data->dev_conf.link_speed,
+ dev->data->port_id);
goto error;
}
if (err)
goto error;
+skip_link_setup:
+
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
ixgbe_dev_lsc_interrupt_setup(dev);
struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ int vf;
PMD_INIT_FUNC_TRACE();
/* stop adapter */
ixgbe_stop_adapter(hw);
+ for (vf = 0; vfinfo != NULL &&
+ vf < dev->pci_dev->max_vfs; vf++)
+ vfinfo[vf].clear_to_send = false;
+
/* Turn off the laser */
ixgbe_disable_tx_laser(hw);
stats->oerrors = 0;
+ /* XON/XOFF pause frames */
+ stats->tx_pause_xon = hw_stats->lxontxc;
+ stats->rx_pause_xon = hw_stats->lxonrxc;
+ stats->tx_pause_xoff = hw_stats->lxofftxc;
+ stats->rx_pause_xoff = hw_stats->lxoffrxc;
+
/* Flow Director Stats registers */
hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
}
/* return 0 means link status changed, -1 means not changed */
}
/*
- * It executes link_update after knowing an interrupt occured.
+ * It executes link_update after knowing an interrupt occurred.
*
* @param dev
* Pointer to struct rte_eth_dev.
int err;
uint32_t rx_buf_size;
uint32_t max_high_water;
+ uint32_t mflcn;
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
ixgbe_fc_rx_pause,
hw->fc.send_xon = fc_conf->send_xon;
err = ixgbe_fc_enable(hw);
+
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ mflcn |= IXGBE_MFLCN_PMCF;
+ else
+ mflcn &= ~IXGBE_MFLCN_PMCF;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+ IXGBE_WRITE_FLUSH(hw);
+
return 0;
}
hw->mac.ops.reset_hw(hw);
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
ixgbevf_dev_tx_init(dev);
/* This can fail when allocating mbufs for descriptor rings */
return 0;
}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __attribute__((unused)) uint32_t index,
+ __attribute__((unused)) uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag == 0)
+ return;
+ PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr *dev_mac_addrs;
+ uint32_t i;
+ int diag;
+
+ /*
+ * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC address with the exception of the one to be deleted.
+ */
+ (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /* Add again all MAC addresses, excepted the deleted one. */
+ dev_mac_addrs = dev->data->mac_addrs;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2,
+ dev_mac_addrs[i].addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR, "Adding MAC address failed diag=%d",
+ diag);
+ }
+}
+
+static struct rte_driver rte_ixgbe_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbe_pmd_init,
+};
+
+static struct rte_driver rte_ixgbevf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbevf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_ixgbe_driver);
+PMD_REGISTER_DRIVER(rte_ixgbevf_driver);