case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
/* FALLTHROUGH */
- bnxt_link_update_op(bp->eth_dev, 1);
+ bnxt_link_update_op(bp->eth_dev, 0);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
+ bnxt_enable_int(bp);
rc = bnxt_hwrm_if_change(bp, 1);
if (!rc) {
if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
- bnxt_enable_int(bp);
bp->flags |= BNXT_FLAG_INIT_DONE;
eth_dev->data->dev_started = 1;
bp->dev_stopped = 0;
/* TBD: STOP HW queues DMA */
eth_dev->data->dev_link.link_status = 0;
}
- bnxt_set_hwrm_link_config(bp, false);
+ bnxt_dev_set_link_down_op(eth_dev);
+ /* Wait for link to be reset and the async notification to process. */
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL * 2);
/* Clean queue intr-vector mapping */
rte_intr_efd_disable(intr_handle);
bnxt_hwrm_port_clr_stats(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
+ /* Process any remaining notifications in default completion queue */
+ bnxt_int_handler(eth_dev);
bnxt_shutdown_nic(bp);
bnxt_hwrm_if_change(bp, 0);
bp->dev_stopped = 1;
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
- memcpy(ð_dev->data->dev_link, &new,
- sizeof(struct rte_eth_link));
+ rte_eth_linkstatus_set(eth_dev, &new);
_rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_INTR_LSC,