X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_ethdev.c;h=d3242fdcafc39f546b1f6431caf084009ac6b407;hb=9236f4328448e5553586450924d648803f5ded13;hp=d53b2b285ef5f2d57220c69302eac0164c948a48;hpb=0807f80d35d07a7a785771b41b424854e7aaf69f;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index d53b2b285e..d3242fdcaf 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -114,7 +114,7 @@ static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, - int on); + int on); static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); @@ -128,18 +128,18 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); static int ixgbe_dev_led_on(struct rte_eth_dev *dev); static int ixgbe_dev_led_off(struct rte_eth_dev *dev); static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf); + struct rte_eth_fc_conf *fc_conf); static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); + void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); @@ -149,8 +149,10 @@ static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv, static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); +static void ixgbevf_dev_close(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); -static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -161,8 +163,8 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); /* - * * Define VF Stats MACRO for Non "cleared on read" register - * */ + * Define VF Stats MACRO for Non "cleared on read" register + */ #define UPDATE_VF_STAT(reg, last, cur) \ { \ u32 latest = IXGBE_READ_REG(hw, reg); \ @@ -202,8 +204,7 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); */ static struct rte_pci_id pci_id_ixgbe_map[] = { -#undef RTE_LIBRTE_IGB_PMD -#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, #include "rte_pci_dev_ids.h" { .vendor_id = 0, /* sentinel */ }, @@ -214,13 +215,11 @@ static struct rte_pci_id pci_id_ixgbe_map[] = { * The set of PCI devices this driver supports (for 82599 VF) */ static struct rte_pci_id pci_id_ixgbevf_map[] = { -{ - .vendor_id = PCI_VENDOR_ID_INTEL, - .device_id = IXGBE_DEV_ID_82599_VF, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, -}, + +#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" { .vendor_id = 0, /* sentinel */ }, + }; static struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -273,8 +272,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = { .link_update = ixgbe_dev_link_update, .stats_get = ixgbevf_dev_stats_get, .stats_reset = ixgbevf_dev_stats_reset, - .dev_close = ixgbevf_dev_stop, - + .dev_close = ixgbevf_dev_close, .dev_infos_get = ixgbe_dev_info_get, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, @@ -356,6 +354,18 @@ ixgbe_is_sfp(struct ixgbe_hw *hw) } } +static inline void +ixgbe_enable_intr(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); + IXGBE_WRITE_FLUSH(hw); +} + /* * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h. */ @@ -416,12 +426,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n", (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx); - n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG; + n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); if (n >= IXGBE_NB_STAT_MAPPING_REGS) { PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n"); return -EIO; } - offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG; + offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); /* Now clear any previous stat_idx set */ clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); @@ -480,16 +490,18 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; - int dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; + uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; dcb_config->num_tcs.pg_tcs = dcb_max_tc; dcb_config->num_tcs.pfc_tcs = dcb_max_tc; for (i = 0; i < dcb_max_tc; i++) { tc = &dcb_config->tc_config[i]; tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; - tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1); + tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1); + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + (uint8_t)(100/dcb_max_tc + (i & 1)); tc->pfc = ixgbe_dcb_pfc_disabled; } @@ -555,7 +567,10 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; - hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; +#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP + hw->allow_unsupported_sfp = 1; +#endif /* Initialize the shared code */ diag = ixgbe_init_shared_code(hw); @@ -577,8 +592,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, } hw->fc.send_xon = 1; - ixgbe_disable_intr(hw); - /* Make sure we have a good EEPROM before we read from it */ diag = ixgbe_validate_eeprom_checksum(hw, &csum); if (diag != IXGBE_SUCCESS) { @@ -616,6 +629,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return -EIO; } + /* disable interrupt */ + ixgbe_disable_intr(hw); + /* pick up the PCI bus settings for reporting later */ ixgbe_get_bus_info(hw); @@ -662,6 +678,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, rte_intr_callback_register(&(pci_dev->intr_handle), ixgbe_dev_interrupt_handler, (void *)eth_dev); + /* enable uio intr after callback register */ + rte_intr_enable(&(pci_dev->intr_handle)); + + /* enable support intr */ + ixgbe_enable_intr(eth_dev); + return 0; } @@ -687,7 +709,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; - hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -710,6 +732,10 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->mac.num_rar_entries = hw->mac.max_rx_queues; diag = hw->mac.ops.reset_hw(hw); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return (diag); + } /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * @@ -732,7 +758,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, default: PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return (diag); + return (-EIO); } PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n", @@ -746,7 +772,9 @@ static struct eth_driver rte_ixgbe_pmd = { { .name = "rte_ixgbe_pmd", .id_table = pci_id_ixgbe_map, +#ifdef RTE_EAL_UNBIND_PORTS .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, +#endif }, .eth_dev_init = eth_ixgbe_dev_init, .dev_private_size = sizeof(struct ixgbe_adapter), @@ -759,7 +787,9 @@ static struct eth_driver rte_ixgbevf_pmd = { { .name = "rte_ixgbevf_pmd", .id_table = pci_id_ixgbevf_map, +#ifdef RTE_EAL_UNBIND_PORTS .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, +#endif }, .eth_dev_init = eth_ixgbevf_dev_init, .dev_private_size = sizeof(struct ixgbe_adapter), @@ -1093,7 +1123,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int err, link_up = 0, negotiate = 0; uint32_t speed = 0; - + int mask = 0; + PMD_INIT_FUNC_TRACE(); /* IXGBE devices don't support half duplex */ @@ -1112,15 +1143,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* reinitialize adapter * this calls reset and start */ ixgbe_init_hw(hw); + hw->mac.ops.start_hw(hw); /* initialize transmission unit */ ixgbe_dev_tx_init(dev); - + /* This can fail when allocating mbufs for descriptor rings */ err = ixgbe_dev_rx_init(dev); if (err) { PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n"); - return err; + goto error; } ixgbe_dev_rxtx_start(dev); @@ -1164,7 +1196,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) default: PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n", dev->data->dev_conf.link_speed, dev->data->port_id); - return -EINVAL; + goto error; } err = ixgbe_setup_link(hw, speed, negotiate, link_up); @@ -1172,11 +1204,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; /* check if lsc interrupt is enabled */ - if (dev->data->dev_conf.intr_conf.lsc != 0) { - err = ixgbe_dev_interrupt_setup(dev); - if (err) - goto error; - } + if (dev->data->dev_conf.intr_conf.lsc != 0) + ixgbe_dev_lsc_interrupt_setup(dev); + + /* resume enabled intr since hw reset */ + ixgbe_enable_intr(dev); mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ ETH_VLAN_EXTEND_MASK; @@ -1323,12 +1355,12 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); if (hw->mac.type != ixgbe_mac_82598EB) { - hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + - ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); - hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + - ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); - hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + - ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } else { @@ -1503,8 +1535,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->max_rx_queues = hw->mac.max_rx_queues; - dev_info->max_tx_queues = hw->mac.max_tx_queues; + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; @@ -1639,14 +1671,13 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev) +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); ixgbe_dev_link_status_print(dev); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + intr->mask |= IXGBE_EICR_LSC; return 0; } @@ -1669,12 +1700,14 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); + /* clear all cause mask */ + ixgbe_disable_intr(hw); /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - PMD_INIT_LOG(INFO, "eicr %x", eicr); + PMD_DRV_LOG(INFO, "eicr %x", eicr); + + intr->flags = 0; if (eicr & IXGBE_EICR_LSC) { /* set flag for async link update */ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -1732,11 +1765,43 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int64_t timeout; + struct rte_eth_link link; + int intr_enable_delay = false; - if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { - return -1; + PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags); + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + /* get the link status before link update, for predicting later */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + ixgbe_dev_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); + + intr_enable_delay = true; + } + + if (intr_enable_delay) { + if (rte_eal_alarm_set(timeout * 1000, + ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + PMD_DRV_LOG(ERR, "Error setting alarm"); + } else { + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); } - ixgbe_dev_link_update(dev, 0); + return 0; } @@ -1761,19 +1826,17 @@ ixgbe_dev_interrupt_delayed_handler(void *param) struct rte_eth_dev *dev = (struct rte_eth_dev *)param; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - IXGBE_READ_REG(hw, IXGBE_EICR); - ixgbe_dev_interrupt_action(dev); if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(&(dev->pci_dev->intr_handle)); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); ixgbe_dev_link_status_print(dev); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } + + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler\n"); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); } /** @@ -1789,36 +1852,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * void */ static void -ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param) +ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) { - int64_t timeout; - struct rte_eth_link link; struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - struct ixgbe_interrupt *intr = - IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - - /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); ixgbe_dev_interrupt_get_status(dev); ixgbe_dev_interrupt_action(dev); - - if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - return; - - /* likely to up */ - if (!link.link_status) - /* handle it 1 sec later, wait it being stable */ - timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; - /* likely to down */ - else - /* handle it 4 sec later, wait it being stable */ - timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; - - ixgbe_dev_link_status_print(dev); - if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, param) < 0) - PMD_INIT_LOG(ERR, "Error setting alarm"); } static int @@ -2129,15 +2168,24 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf* conf = &dev->data->dev_conf; + PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n", + dev->data->port_id); + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ +#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - /* - * VF has no ability to enable/disable HW CRC - * Keep the persistent behavior the same as Host PF - */ PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); conf->rxmode.hw_strip_crc = 1; } +#else + if (conf->rxmode.hw_strip_crc) { + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n"); + conf->rxmode.hw_strip_crc = 0; + } +#endif return 0; } @@ -2145,14 +2193,21 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) static int ixgbevf_dev_start(struct rte_eth_dev *dev) { - int err = 0; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err, mask = 0; + PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start"); + hw->mac.ops.reset_hw(hw); + ixgbevf_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ err = ixgbevf_dev_rx_init(dev); - if(err){ + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err); ixgbe_dev_clear_queues(dev); - PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n"); return err; } @@ -2175,10 +2230,30 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop"); + + hw->adapter_stopped = TRUE; + ixgbe_stop_adapter(hw); + + /* + * Clear what we set, but we still keep shadow_vfta to + * restore after device starts + */ + ixgbevf_set_vfta_all(dev,0); + + ixgbe_dev_clear_queues(dev); +} + +static void +ixgbevf_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close"); ixgbe_reset_hw(hw); - hw->adapter_stopped = 0; - ixgbe_stop_adapter(hw); + + ixgbevf_dev_stop(dev); + /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } @@ -2262,7 +2337,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t i, on = 0; + uint16_t i; + int on = 0; /* VF function only support hw strip feature, others are not support */ if(mask & ETH_VLAN_STRIP_MASK){ @@ -2272,4 +2348,3 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) ixgbevf_vlan_strip_queue_set(dev,i,on); } } -