X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ethdev.c;h=58d72bbfa3fd1f536726e380925f416874f7bf6e;hb=0c061eadec5947092e31a3b9310fc89124eeb884;hp=844ed7d63607edbef231ce5ce63e0d854461ab29;hpb=86d8adc7702ce12e94dfc9b222f651ea6bb221f3;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 844ed7d636..58d72bbfa3 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -8,15 +8,34 @@ #include #include #include + +#include #include #include +#include +#include #include "txgbe_logs.h" #include "base/txgbe.h" #include "txgbe_ethdev.h" #include "txgbe_rxtx.h" +static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); +static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); static int txgbe_dev_close(struct rte_eth_dev *dev); +static int txgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); + +static void txgbe_dev_link_status_print(struct rte_eth_dev *dev); +static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); +static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); +static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); +static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); +static void txgbe_dev_interrupt_handler(void *param); +static void txgbe_dev_interrupt_delayed_handler(void *param); +static void txgbe_configure_msix(struct rte_eth_dev *dev); /* * The set of PCI devices this driver supports @@ -59,6 +78,29 @@ txgbe_is_sfp(struct txgbe_hw *hw) } } +static inline void +txgbe_enable_intr(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + wr32(hw, TXGBE_IENMISC, intr->mask_misc); + wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK); + wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK); + txgbe_flush(hw); +} + +static void +txgbe_disable_intr(struct txgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + wr32(hw, TXGBE_IENMISC, ~BIT_MASK32); + wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK); + wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK); + txgbe_flush(hw); +} + static int eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { @@ -145,6 +187,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return -EIO; } + /* disable interrupt */ + txgbe_disable_intr(hw); + /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); @@ -182,9 +227,15 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); + rte_intr_callback_register(intr_handle, + txgbe_dev_interrupt_handler, eth_dev); + /* enable uio/vfio intr/eventfd mapping */ rte_intr_enable(intr_handle); + /* enable support intr */ + txgbe_enable_intr(eth_dev); + return 0; } @@ -252,6 +303,254 @@ static struct rte_pci_driver rte_txgbe_pmd = { .remove = eth_txgbe_pci_remove, }; +static int +txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + switch (nb_rx_q) { + case 1: + case 2: + RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + break; + case 4: + RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + break; + default: + return -EINVAL; + } + + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + return 0; +} + +static int +txgbe_check_mq_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV active," + " unsupported mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) + if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " invalid queue number" + " for VMDQ RSS, allowed" + " value are 1, 2 or 4."); + return -EINVAL; + } + break; + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_NONE: + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = + ETH_MQ_RX_VMDQ_ONLY; + break; + default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + dev->data->dev_conf.txmode.mq_mode = + ETH_MQ_TX_VMDQ_ONLY; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return -EINVAL; + } + } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } + /* check configuration for vmdb+dcb mode */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", + TXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools must be %d or %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", + TXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools != %d and" + " nb_queue_pools != %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + conf = &dev_conf->rx_adv_conf.dcb_rx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + conf = &dev_conf->tx_adv_conf.dcb_tx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + } + return 0; +} + +static int +txgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multiple queue mode checking */ + ret = txgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + + return 0; +} + +static void +txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + uint32_t gpie; + + gpie = rd32(hw, TXGBE_GPIOINTEN); + gpie |= TXGBE_GPIOBIT_6; + wr32(hw, TXGBE_GPIOINTEN, gpie); + intr->mask_misc |= TXGBE_ICRMISC_GPIO; +} + +/* + * Set device link up: enable tx. + */ +static int +txgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (hw->phy.media_type == txgbe_media_type_copper) { + /* Turn on the copper */ + hw->phy.set_phy_power(hw, true); + } else { + /* Turn on the laser */ + hw->mac.enable_tx_laser(hw); + txgbe_dev_link_update(dev, 0); + } + + return 0; +} + +/* + * Set device link down: disable tx. + */ +static int +txgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (hw->phy.media_type == txgbe_media_type_copper) { + /* Turn off the copper */ + hw->phy.set_phy_power(hw, false); + } else { + /* Turn off the laser */ + hw->mac.disable_tx_laser(hw); + txgbe_dev_link_update(dev, 0); + } + + return 0; +} + /* * Reset and stop device. */ @@ -260,12 +559,30 @@ txgbe_dev_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); + do { + ret = rte_intr_callback_unregister(intr_handle, + txgbe_dev_interrupt_handler, dev); + if (ret >= 0 || ret == -ENOENT) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + rte_delay_ms(100); + } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); + + /* cancel the delay handler before remove dev */ + rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev); + rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; @@ -338,8 +655,495 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) return 0; } +void +txgbe_dev_setup_link_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + u32 speed; + bool autoneg = false; + + speed = hw->phy.autoneg_advertised; + if (!speed) + hw->mac.get_link_capabilities(hw, &speed, &autoneg); + + hw->mac.setup_link(hw, speed, true); + + intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; +} + +/* return 0 means link status changed, -1 means not changed */ +int +txgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct rte_eth_link link; + u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + bool link_up; + int err; + int wait = 1; + + memset(&link, 0, sizeof(link)); + link.link_status = ETH_LINK_DOWN; + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; + + hw->mac.get_link_status = true; + + if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG) + return rte_eth_linkstatus_set(dev, &link); + + /* check if it needs to wait to complete, if lsc interrupt is enabled */ + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + wait = 0; + + err = hw->mac.check_link(hw, &link_speed, &link_up, wait); + + if (err != 0) { + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + return rte_eth_linkstatus_set(dev, &link); + } + + if (link_up == 0) { + if (hw->phy.media_type == txgbe_media_type_fiber) { + intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; + rte_eal_alarm_set(10, + txgbe_dev_setup_link_alarm_handler, dev); + } + return rte_eth_linkstatus_set(dev, &link); + } + + intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; + link.link_status = ETH_LINK_UP; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (link_speed) { + default: + case TXGBE_LINK_SPEED_UNKNOWN: + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case TXGBE_LINK_SPEED_100M_FULL: + link.link_speed = ETH_SPEED_NUM_100M; + break; + + case TXGBE_LINK_SPEED_1GB_FULL: + link.link_speed = ETH_SPEED_NUM_1G; + break; + + case TXGBE_LINK_SPEED_2_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + + case TXGBE_LINK_SPEED_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_5G; + break; + + case TXGBE_LINK_SPEED_10GB_FULL: + link.link_speed = ETH_SPEED_NUM_10G; + break; + } + + return rte_eth_linkstatus_set(dev, &link); +} + +static int +txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return txgbe_dev_link_update_share(dev, wait_to_complete); +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + + txgbe_dev_link_status_print(dev); + if (on) + intr->mask_misc |= TXGBE_ICRMISC_LSC; + else + intr->mask_misc &= ~TXGBE_ICRMISC_LSC; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + + intr->mask[0] |= TXGBE_ICR_MASK; + intr->mask[1] |= TXGBE_ICR_MASK; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + + intr->mask_misc |= TXGBE_ICRMISC_LNKSEC; + + return 0; +} + +/* + * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + + /* clear all cause mask */ + txgbe_disable_intr(hw); + + /* read-on-clear nic registers here */ + eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); + + intr->flags = 0; + + /* set flag for async link update */ + if (eicr & TXGBE_ICRMISC_LSC) + intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + + if (eicr & TXGBE_ICRMISC_VFMBX) + intr->flags |= TXGBE_FLAG_MAILBOX; + + if (eicr & TXGBE_ICRMISC_LNKSEC) + intr->flags |= TXGBE_FLAG_MACSEC; + + if (eicr & TXGBE_ICRMISC_GPIO) + intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; + + return 0; +} + +/** + * It gets and then prints the link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static void +txgbe_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_eth_link link; + + rte_eth_linkstatus_get(dev, &link); + + if (link.link_status) { + PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + (int)(dev->data->port_id), + (unsigned int)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + (int)(dev->data->port_id)); + } + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); +} + +/* + * It executes link_update after knowing an interrupt occurred. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + int64_t timeout; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); + + if (intr->flags & TXGBE_FLAG_MAILBOX) + intr->flags &= ~TXGBE_FLAG_MAILBOX; + + if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { + hw->phy.handle_lasi(hw); + intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + + /*get the link status before link update, for predicting later*/ + rte_eth_linkstatus_get(dev, &link); + + txgbe_dev_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = TXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT; + + txgbe_dev_link_status_print(dev); + if (rte_eal_alarm_set(timeout * 1000, + txgbe_dev_interrupt_delayed_handler, + (void *)dev) < 0) { + PMD_DRV_LOG(ERR, "Error setting alarm"); + } else { + /* remember original mask */ + intr->mask_misc_orig = intr->mask_misc; + /* only disable lsc interrupt */ + intr->mask_misc &= ~TXGBE_ICRMISC_LSC; + } + } + + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + txgbe_enable_intr(dev); + rte_intr_enable(intr_handle); + + return 0; +} + +/** + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the + * NIC interrupt state is not stable for txgbe after link is just down, + * it needs to wait 4 seconds to get the stable status. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) registered before. + * + * @return + * void + */ +static void +txgbe_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t eicr; + + txgbe_disable_intr(hw); + + eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; + + if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { + hw->phy.handle_lasi(hw); + intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; + } + + if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { + txgbe_dev_link_update(dev, 0); + intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; + txgbe_dev_link_status_print(dev); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (intr->flags & TXGBE_FLAG_MACSEC) { + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~TXGBE_FLAG_MACSEC; + } + + /* restore original mask */ + intr->mask_misc = intr->mask_misc_orig; + intr->mask_misc_orig = 0; + + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); + txgbe_enable_intr(dev); + rte_intr_enable(intr_handle); +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) registered before. + * + * @return + * void + */ +static void +txgbe_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + txgbe_dev_interrupt_get_status(dev); + txgbe_dev_interrupt_action(dev, dev->intr_handle); +} + +/** + * set the IVAR registers, mapping interrupt causes to vectors + * @param hw + * pointer to txgbe_hw struct + * @direction + * 0 for Rx, 1 for Tx, -1 for other causes + * @queue + * queue to map the corresponding interrupt to + * @msix_vector + * the vector to map to the corresponding queue + */ +void +txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + if (direction == -1) { + /* other causes */ + msix_vector |= TXGBE_IVARMISC_VLD; + idx = 0; + tmp = rd32(hw, TXGBE_IVARMISC); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + wr32(hw, TXGBE_IVARMISC, tmp); + } else { + /* rx or tx causes */ + /* Workround for ICR lost */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + wr32(hw, TXGBE_IVAR(queue >> 1), tmp); + } +} + +/** + * Sets up the hardware to properly generate MSI-X interrupts + * @hw + * board private structure + */ +static void +txgbe_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t queue_id, base = TXGBE_MISC_VEC_ID; + uint32_t vec = TXGBE_MISC_VEC_ID; + uint32_t gpie; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. + */ + gpie = rd32(hw, TXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & TXGBE_GPIE_MSIX)) + return; + + if (rte_intr_allow_others(intr_handle)) { + base = TXGBE_RX_VEC_START; + vec = base; + } + + /* setup GPIE for MSI-x mode */ + gpie = rd32(hw, TXGBE_GPIE); + gpie |= TXGBE_GPIE_MSIX; + wr32(hw, TXGBE_GPIE, gpie); + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + txgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID); + } + wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID), + TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | TXGBE_ITR_WRDSA); +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { + .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, + .dev_set_link_up = txgbe_dev_set_link_up, + .dev_set_link_down = txgbe_dev_set_link_down, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);