#include <stdarg.h>
#include <inttypes.h>
#include <netinet/in.h>
+#include <rte_string_fns.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
- struct rte_intr_handle *handle);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static void ixgbe_dev_setup_link_alarm_handler(void *param);
+
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
+static void ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
- { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
.xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
.dev_reset = ixgbevf_dev_reset,
+ .promiscuous_enable = ixgbevf_dev_promiscuous_enable,
+ .promiscuous_disable = ixgbevf_dev_promiscuous_disable,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get = ixgbevf_dev_info_get,
return -EIO;
}
+ if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
+ PMD_INIT_LOG(ERR, "\nERROR: "
+ "Firmware recovery mode detected. Limiting functionality.\n"
+ "Refer to the Intel(R) Ethernet Adapters and Devices "
+ "User Guide for details on firmware recovery mode.");
+ return -EIO;
+ }
+
/* pick up the PCI bus settings for reporting later */
ixgbe_get_bus_info(hw);
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
rte_delay_ms(100);
} while (retries++ < (10 + IXGBE_LINK_UP_TIME));
+ /* cancel the delay handler before remove dev */
+ rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev);
+
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
- rte_free(eth_dev->data->hash_mac_addrs);
- eth_dev->data->hash_mac_addrs = NULL;
-
/* remove all the fdir filters & hash */
ixgbe_fdir_filter_uninit(eth_dev);
/* start with highest supported, proceed down */
static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+ ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
*/
if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return diag;
+ /*
+ * This error code will be propagated to the app by
+ * rte_eth_dev_reset, so use a public error code rather than
+ * the internal-only IXGBE_ERR_RESET_FAILED
+ */
+ return -EAGAIN;
}
/* negotiate mailbox API version to use with the PF. */
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
/* Disable the interrupts for VF */
ixgbevf_intr_disable(eth_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
ixgbevf_dev_interrupt_handler, eth_dev);
rxq = dev->data->rx_queues[queue];
- if (on)
+ if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- else
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
}
static void
}
}
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ixgbe_rx_queue *rxq;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
static int
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
return 0;
}
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static void
ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
- if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
- if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
return -EINVAL;
}
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
goto error;
}
- /* Skip link setup if loopback mode is enabled for 82599. */
- if (hw->mac.type == ixgbe_mac_82599EB &&
- dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
- goto skip_link_setup;
+ /* Skip link setup if loopback mode is enabled. */
+ if (dev->data->dev_conf.lpbk_mode != 0) {
+ err = ixgbe_check_supported_loopback_mode(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unsupported loopback mode");
+ goto error;
+ } else {
+ goto skip_link_setup;
+ }
+ }
if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
err = hw->mac.ops.setup_sfp(hw);
if (err)
goto error;
- ixgbe_dev_link_update(dev, 0);
-
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
"please call hierarchy_commit() "
"before starting the port");
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbe_dev_link_update(dev, 0);
+
return 0;
error:
ixgbe_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
PMD_INIT_FUNC_TRACE();
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
/* disable interrupts */
ixgbe_disable_intr(hw);
/* reset hierarchy commit */
tm_conf->committed = false;
+
+ adapter->rss_reta_updated = 0;
}
/*
}
/* Flow Director Stats registers */
- hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) & 0xFFFF;
+ hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+ hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) & 0xFFFF;
+ hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+ }
/* MACsec Stats registers */
macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
macsec_stats->out_pkts_encrypted +=
/* Extended stats from ixgbe_hw_stats */
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s",
- rte_ixgbe_stats_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_ixgbe_stats_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
/* MACsec Stats */
for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s",
- rte_ixgbe_macsec_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_ixgbe_macsec_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
/* Extended stats from ixgbe_hw_stats */
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s",
- rte_ixgbe_stats_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_ixgbe_stats_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
/* MACsec Stats */
for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "%s",
- rte_ixgbe_macsec_strings[i].name);
+ strlcpy(xstats_names[count].name,
+ rte_ixgbe_macsec_strings[i].name,
+ sizeof(xstats_names[count].name));
count++;
}
if (xstats_names != NULL)
for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
- "%s", rte_ixgbevf_stats_strings[i].name);
+ strlcpy(xstats_names[i].name,
+ rte_ixgbevf_stats_strings[i].name,
+ sizeof(xstats_names[i].name));
return IXGBEVF_NB_XSTATS;
}
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
+ dev_info->min_mtu = ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
}
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
}
static const uint32_t *
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
+ dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
int *link_up, int wait_to_complete)
{
- /**
- * for a quick link status checking, wait_to_compelet == 0,
- * skip PF link status checking
- */
- bool no_pflink_check = wait_to_complete == 0;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
uint32_t links_reg, in_msg;
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
- if (no_pflink_check) {
- if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
- mac->get_link_status = true;
- else
- mac->get_link_status = false;
-
- goto out;
- }
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
if (in_msg & IXGBE_VT_MSGTYPE_NACK)
- ret_val = -1;
+ mac->get_link_status = false;
goto out;
}
return ret_val;
}
+static void
+ixgbe_dev_setup_link_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ u32 speed;
+ bool autoneg = false;
+
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+
+ ixgbe_setup_link(hw, speed, true);
+
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+}
+
/* return 0 means link status changed, -1 means not changed */
int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int link_up;
int diag;
- u32 speed = 0;
int wait = 1;
- bool autoneg = false;
memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
hw->mac.get_link_status = true;
- if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
- ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
- speed = hw->phy.autoneg_advertised;
- if (!speed)
- ixgbe_get_link_capabilities(hw, &speed, &autoneg);
- ixgbe_setup_link(hw, speed, true);
- }
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
+ return rte_eth_linkstatus_set(dev, &link);
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
}
if (link_up == 0) {
- intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ rte_eal_alarm_set(10,
+ ixgbe_dev_setup_link_alarm_handler, dev);
+ }
return rte_eth_linkstatus_set(dev, &link);
}
- intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
- struct rte_intr_handle *intr_handle)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
PMD_DRV_LOG(DEBUG, "enable intr immediately");
ixgbe_enable_intr(dev);
- rte_intr_enable(intr_handle);
return 0;
}
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
ixgbe_dev_interrupt_get_status(dev);
- ixgbe_dev_interrupt_action(dev, dev->intr_handle);
+ ixgbe_dev_interrupt_action(dev);
}
static int
uint8_t j, mask;
uint32_t reta, r;
uint16_t idx, shift;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reta_reg;
}
IXGBE_WRITE_REG(hw, reta_reg, reta);
}
+ adapter->rss_reta_updated = 1;
return 0;
}
uint32_t maxfrs;
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
struct rte_eth_dev_data *dev_data = dev->data;
ixgbe_dev_info_get(dev, &dev_info);
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
}
#else
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
#endif
PMD_INIT_FUNC_TRACE();
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
ixgbe_dev_clear_queues(dev);
ixgbevf_dev_rxtx_start(dev);
- ixgbevf_dev_link_update(dev, 0);
-
/* check and configure queue intr-vector mapping */
if (rte_intr_cap_multiple(intr_handle) &&
dev->data->dev_conf.intr_conf.rxq) {
/* Re-enable interrupt for VF */
ixgbevf_intr_enable(dev);
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbevf_dev_link_update(dev, 0);
+
return 0;
}
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
ixgbevf_intr_disable(dev);
hw->adapter_stopped = 1;
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
+
+ adapter->rss_reta_updated = 0;
}
static void
}
static int
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
struct ixgbe_rx_queue *rxq;
uint16_t i;
return 0;
}
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
int
ixgbe_vt_check(struct ixgbe_hw *hw)
{
/* won't configure msix register if no mapping is done
* between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
*/
- if (!rte_intr_dp_is_en(intr_handle))
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
return;
if (rte_intr_allow_others(intr_handle))
/* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
- queue_id++) {
- /* by default, 1:1 mapping */
- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- IXGBE_MISC_VEC_ID);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
- break;
- default:
- break;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1,
+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
- uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD;
struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return ret;
}
+static void
+ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC);
+}
+
+static void
+ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+}
+
static void
ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
-RTE_INIT(ixgbe_init_log);
-static void
-ixgbe_init_log(void)
+RTE_INIT(ixgbe_init_log)
{
ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
if (ixgbe_logtype_init >= 0)