X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=6bc006a71981d54a91249b1044b1a2e1a04c5f24;hb=df6cd7c1f73a;hp=2a8b502967b646dc67fa4270df99544af8e1f3c7;hpb=bd0a14c99f6588ea4c60a234e9e053fd2b329964;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 2a8b502967..6bc006a719 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "bnxt.h" #include "bnxt_cpr.h" @@ -166,6 +167,18 @@ static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); +static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); + +int is_bnxt_in_error(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return -EIO; + if (bp->flags & BNXT_FLAG_FW_RESET) + return -EBUSY; + + return 0; +} /***********************/ @@ -191,22 +204,32 @@ static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; } -static void bnxt_free_mem(struct bnxt *bp) +static void bnxt_free_mem(struct bnxt *bp, bool reconfig) { bnxt_free_filter_mem(bp); bnxt_free_vnic_attributes(bp); bnxt_free_vnic_mem(bp); - bnxt_free_stats(bp); - bnxt_free_tx_rings(bp); - bnxt_free_rx_rings(bp); + /* tx/rx rings are configured as part of *_queue_setup callbacks. + * If the number of rings change across fw update, + * we don't have much choice except to warn the user. + */ + if (!reconfig) { + bnxt_free_stats(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + } bnxt_free_async_cp_ring(bp); } -static int bnxt_alloc_mem(struct bnxt *bp) +static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) { int rc; + rc = bnxt_alloc_ring_grps(bp); + if (rc) + goto alloc_mem_err; + rc = bnxt_alloc_async_ring_struct(bp); if (rc) goto alloc_mem_err; @@ -230,7 +253,7 @@ static int bnxt_alloc_mem(struct bnxt *bp) return 0; alloc_mem_err: - bnxt_free_mem(bp); + bnxt_free_mem(bp, reconfig); return rc; } @@ -494,12 +517,17 @@ static int bnxt_init_nic(struct bnxt *bp) * Device configuration and status function */ -static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, - struct rte_eth_dev_info *dev_info) +static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) { struct bnxt *bp = eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* MAC Specifics */ dev_info->max_mac_addrs = bp->max_l2_ctx; @@ -588,6 +616,8 @@ found: dev_info->vmdq_pool_base = 0; dev_info->vmdq_queue_base = 0; + + return 0; } /* Configure the device based on the configuration provided */ @@ -602,6 +632,10 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) bp->tx_nr_rings = eth_dev->data->nb_tx_queues; bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { rc = bnxt_hwrm_check_vf_rings(bp); if (rc) { @@ -791,8 +825,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); + bnxt_enable_int(bp); bp->flags |= BNXT_FLAG_INIT_DONE; + eth_dev->data->dev_started = 1; bp->dev_stopped = 0; return 0; @@ -835,6 +871,11 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + eth_dev->data->dev_started = 0; + /* Prevent crashes when queues are still in use */ + eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; + bnxt_disable_int(bp); /* disable uio/vfio intr/eventfd mapping */ @@ -889,6 +930,9 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, struct bnxt_filter_info *filter, *temp_filter; uint32_t i; + if (is_bnxt_in_error(bp)) + return; + /* * Loop through all VNICs from the specified filter flow pools to * remove the corresponding MAC addr filter @@ -924,6 +968,10 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, struct bnxt_filter_info *filter; int rc = 0; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); return -ENOTSUP; @@ -969,6 +1017,10 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) struct rte_eth_link new; unsigned int cnt = BNXT_LINK_WAIT_CNT; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + memset(&new, 0, sizeof(new)); do { /* Retrieve link info from hardware */ @@ -1004,60 +1056,104 @@ out: return rc; } -static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) +static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; vnic = &bp->vnic_info[0]; + old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) +static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; vnic = &bp->vnic_info[0]; + old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) +static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; vnic = &bp->vnic_info[0]; + old_flags = vnic->flags; vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } -static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) +static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; + uint32_t old_flags; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (bp->vnic_info == NULL) - return; + return 0; vnic = &bp->vnic_info[0]; + old_flags = vnic->flags; vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; - bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); + if (rc != 0) + vnic->flags = old_flags; + + return rc; } /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ @@ -1100,7 +1196,11 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); uint16_t idx, sft; - int i; + int i, rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (!vnic->rss_table) return -EINVAL; @@ -1156,6 +1256,11 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); uint16_t idx, sft, i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* Retrieve from the default VNIC */ if (!vnic) @@ -1202,6 +1307,11 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct bnxt_vnic_info *vnic; uint16_t hash_type = 0; unsigned int i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* * If RSS enablement were different than dev_configure, @@ -1255,9 +1365,13 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, { struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - int len; + int len, rc; uint32_t hash_types; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + /* RSS configuration is the same for all VNICs */ if (vnic && vnic->rss_hash_key) { if (rss_conf->rss_key) { @@ -1315,6 +1429,10 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, struct rte_eth_link link_info; int rc; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + rc = bnxt_get_hwrm_link_config(bp, &link_info); if (rc) return rc; @@ -1344,6 +1462,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); @@ -1403,6 +1526,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, uint16_t tunnel_type = 0; int rc = 0; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + switch (udp_tunnel->prot_type) { case RTE_TUNNEL_TYPE_VXLAN: if (bp->vxlan_port_cnt) { @@ -1452,6 +1579,10 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, uint16_t port = 0; int rc = 0; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + switch (udp_tunnel->prot_type) { case RTE_TUNNEL_TYPE_VXLAN: if (!bp->vxlan_port_cnt) { @@ -1605,6 +1736,11 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) { struct bnxt *bp = eth_dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; /* These operations apply to ALL existing MAC/VLAN filters */ if (on) @@ -1619,6 +1755,11 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) struct bnxt *bp = dev->data->dev_private; uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; unsigned int i; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; if (mask & ETH_VLAN_FILTER_MASK) { if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { @@ -1660,6 +1801,10 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct bnxt_filter_info *filter; int rc; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) return -EPERM; @@ -1699,6 +1844,11 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, char *mc_addr_list = (char *)mc_addr_set; struct bnxt_vnic_info *vnic; uint32_t off = 0, i = 0; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; vnic = &bp->vnic_info[0]; @@ -1784,10 +1934,18 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) uint32_t rc = 0; uint32_t i; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS; - bnxt_dev_info_get_op(eth_dev, &dev_info); + rc = bnxt_dev_info_get_op(eth_dev, &dev_info); + if (rc != 0) { + PMD_DRV_LOG(ERR, "Error during getting ethernet device info\n"); + return rc; + } if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) { PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n", @@ -1857,6 +2015,10 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) uint16_t vlan = bp->vlan; int rc; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { PMD_DRV_LOG(ERR, "PVID cannot be modified for this function\n"); @@ -1874,6 +2036,11 @@ static int bnxt_dev_led_on_op(struct rte_eth_dev *dev) { struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; return bnxt_hwrm_port_led_cfg(bp, true); } @@ -1882,6 +2049,11 @@ static int bnxt_dev_led_off_op(struct rte_eth_dev *dev) { struct bnxt *bp = dev->data->dev_private; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; return bnxt_hwrm_port_led_cfg(bp, false); } @@ -1889,6 +2061,7 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev) static uint32_t bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) { + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; uint32_t desc = 0, raw_cons = 0, cons; struct bnxt_cp_ring_info *cpr; struct bnxt_rx_queue *rxq; @@ -1896,6 +2069,11 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint16_t cmp_type; uint8_t cmp = 1; bool valid; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; rxq = dev->data->rx_queues[rx_queue_id]; cpr = rxq->cp_ring; @@ -1940,10 +2118,15 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) struct bnxt_sw_rx_bd *rx_buf; struct rx_pkt_cmpl *rxcmp; uint32_t cons, cp_cons; + int rc; if (!rxq) return -EINVAL; + rc = is_bnxt_in_error(rxq->bp); + if (rc) + return rc; + cpr = rxq->cp_ring; rxr = rxq->rx_ring; @@ -1978,10 +2161,15 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) struct bnxt_sw_tx_bd *tx_buf; struct tx_pkt_cmpl *txcmp; uint32_t cons, cp_cons; + int rc; if (!txq) return -EINVAL; + rc = is_bnxt_in_error(txq->bp); + if (rc) + return rc; + cpr = txq->cp_ring; txr = txq->tx_ring; @@ -2811,6 +2999,10 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, { int ret = 0; + ret = is_bnxt_in_error(dev->data->dev_private); + if (ret) + return ret; + switch (filter_type) { case RTE_ETH_FILTER_TUNNEL: PMD_DRV_LOG(ERR, @@ -3126,6 +3318,10 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) uint32_t dir_entries; uint32_t entry_length; + rc = is_bnxt_in_error(bp); + if (rc) + return rc; + PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n", bp->pdev->addr.domain, bp->pdev->addr.bus, bp->pdev->addr.devid, bp->pdev->addr.function); @@ -3144,6 +3340,11 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev, struct bnxt *bp = dev->data->dev_private; uint32_t index; uint32_t offset; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " "len = %d\n", bp->pdev->addr.domain, @@ -3215,6 +3416,11 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev, struct bnxt *bp = dev->data->dev_private; uint8_t index, dir_op; uint16_t type, ext, ordinal, attr; + int rc; + + rc = is_bnxt_in_error(bp); + if (rc) + return rc; PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d " "len = %d\n", bp->pdev->addr.domain, @@ -3326,6 +3532,89 @@ static const struct eth_dev_ops bnxt_dev_ops = { .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, }; +static void bnxt_dev_cleanup(struct bnxt *bp) +{ + bnxt_set_hwrm_link_config(bp, false); + bp->link_info.link_up = 0; + if (bp->dev_stopped == 0) + bnxt_dev_stop_op(bp->eth_dev); + + bnxt_uninit_resources(bp, true); +} + +static int bnxt_restore_filters(struct bnxt *bp) +{ + struct rte_eth_dev *dev = bp->eth_dev; + int ret = 0; + + if (dev->data->all_multicast) + ret = bnxt_allmulticast_enable_op(dev); + if (dev->data->promiscuous) + ret = bnxt_promiscuous_enable_op(dev); + + /* TODO restore other filters as well */ + return ret; +} + +static void bnxt_dev_recover(void *arg) +{ + struct bnxt *bp = arg; + int timeout = bp->fw_reset_max_msecs; + int rc = 0; + + do { + rc = bnxt_hwrm_ver_get(bp); + if (rc == 0) + break; + rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); + timeout -= BNXT_FW_READY_WAIT_INTERVAL; + } while (rc && timeout); + + if (rc) { + PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); + goto err; + } + + rc = bnxt_init_resources(bp, true); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to initialize resources after reset\n"); + goto err; + } + /* clear reset flag as the device is initialized now */ + bp->flags &= ~BNXT_FLAG_FW_RESET; + + rc = bnxt_dev_start_op(bp->eth_dev); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); + goto err; + } + + rc = bnxt_restore_filters(bp); + if (rc) + goto err; + + PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); + return; +err: + bp->flags |= BNXT_FLAG_FATAL_ERROR; + bnxt_uninit_resources(bp, false); + PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); +} + +void bnxt_dev_reset_and_resume(void *arg) +{ + struct bnxt *bp = arg; + int rc; + + bnxt_dev_cleanup(bp); + + rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, + bnxt_dev_recover, (void *)bp); + if (rc) + PMD_DRV_LOG(ERR, "Error setting recovery alarm"); +} + static bool bnxt_vf_pciid(uint16_t id) { if (id == BROADCOM_DEV_ID_57304_VF || @@ -3399,8 +3688,9 @@ static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, valid_bits = PTU_PTE_VALID; if (rmem->nr_pages > 1) { - snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x", - suffix, idx); + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; mz = rte_memzone_lookup(mz_name); if (!mz) { @@ -3436,7 +3726,8 @@ static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp, rmem->pg_tbl_mz = mz; } - snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x", suffix, idx); + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, @@ -3766,19 +4057,161 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) return rc; } +static int bnxt_restore_dflt_mac(struct bnxt *bp) +{ + int rc = 0; + + /* MAC is already configured in FW */ + if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) + return 0; + + /* Restore the old MAC configured */ + rc = bnxt_hwrm_set_mac(bp); + if (rc) + PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); + + return rc; +} + +static void bnxt_config_vf_req_fwd(struct bnxt *bp) +{ + if (!BNXT_PF(bp)) + return; + #define ALLOW_FUNC(x) \ { \ uint32_t arg = (x); \ bp->pf.vf_req_fwd[((arg) >> 5)] &= \ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ } + + /* Forward all requests if firmware is new enough */ + if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && + (bp->fw_ver < ((20 << 24) | (7 << 16)))) || + ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { + memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); + } else { + PMD_DRV_LOG(WARNING, + "Firmware too old for VF mailbox functionality\n"); + memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); + } + + /* + * The following are used for driver cleanup. If we disallow these, + * VF drivers can't clean up cleanly. + */ + ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); + ALLOW_FUNC(HWRM_VNIC_FREE); + ALLOW_FUNC(HWRM_RING_FREE); + ALLOW_FUNC(HWRM_RING_GRP_FREE); + ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); + ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); + ALLOW_FUNC(HWRM_STAT_CTX_FREE); + ALLOW_FUNC(HWRM_PORT_PHY_QCFG); + ALLOW_FUNC(HWRM_VNIC_TPA_CFG); +} + +static int bnxt_init_fw(struct bnxt *bp) +{ + uint16_t mtu; + int rc = 0; + + rc = bnxt_hwrm_ver_get(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -EIO; + + rc = bnxt_hwrm_queue_qportcfg(bp); + if (rc) + return rc; + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + rc = bnxt_hwrm_func_qcfg(bp, &mtu); + if (rc) + return rc; + + if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && + mtu != bp->eth_dev->data->mtu) + bp->eth_dev->data->mtu = mtu; + + bnxt_hwrm_port_led_qcaps(bp); + + return 0; +} + +static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + rc = bnxt_init_fw(bp); + if (rc) + return rc; + + if (!reconfig_dev) { + rc = bnxt_setup_mac_addr(bp->eth_dev); + if (rc) + return rc; + } else { + rc = bnxt_restore_dflt_mac(bp); + if (rc) + return rc; + } + + bnxt_config_vf_req_fwd(bp); + + rc = bnxt_hwrm_func_driver_register(bp); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to register driver"); + return -EBUSY; + } + + if (BNXT_PF(bp)) { + if (bp->pdev->max_vfs) { + rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); + if (rc) { + PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); + return rc; + } + } else { + rc = bnxt_hwrm_allocate_pf_only(bp); + if (rc) { + PMD_DRV_LOG(ERR, + "Failed to allocate PF resources"); + return rc; + } + } + } + + rc = bnxt_alloc_mem(bp, reconfig_dev); + if (rc) + return rc; + + rc = bnxt_setup_int(bp); + if (rc) + return rc; + + bnxt_init_nic(bp); + + rc = bnxt_request_int(bp); + if (rc) + return rc; + + return 0; +} + static int bnxt_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); static int version_printed; struct bnxt *bp; - uint16_t mtu; int rc; if (version_printed++ == 0) @@ -3820,166 +4253,51 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) rc = bnxt_init_board(eth_dev); if (rc) { PMD_DRV_LOG(ERR, - "Board initialization failed rc: %x\n", rc); - goto error; + "Failed to initialize board rc: %x\n", rc); + return rc; } rc = bnxt_alloc_hwrm_resources(bp); if (rc) { PMD_DRV_LOG(ERR, - "hwrm resource allocation failure rc: %x\n", rc); + "Failed to allocate hwrm resource rc: %x\n", rc); goto error_free; } - rc = bnxt_hwrm_ver_get(bp); + rc = bnxt_init_resources(bp, false); if (rc) goto error_free; - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc); - rc = -EIO; - goto error_free; - } - - rc = bnxt_hwrm_queue_qportcfg(bp); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n"); - goto error_free; - } - /* Get the MAX capabilities for this function */ - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc); - goto error_free; - } - rc = bnxt_alloc_stats_mem(bp); if (rc) goto error_free; - if (bp->max_tx_rings == 0) { - PMD_DRV_LOG(ERR, "No TX rings available!\n"); - rc = -EBUSY; - goto error_free; - } - - rc = bnxt_setup_mac_addr(eth_dev); - if (rc) - goto error_free; - - /* THOR does not support ring groups. - * But we will use the array to save RSS context IDs. - */ - if (BNXT_CHIP_THOR(bp)) { - bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; - } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) { - /* 1 ring is for default completion ring */ - PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n"); - rc = -ENOSPC; - goto error_free; - } - - if (BNXT_HAS_RING_GRPS(bp)) { - bp->grp_info = rte_zmalloc("bnxt_grp_info", - sizeof(*bp->grp_info) * - bp->max_ring_grps, 0); - if (!bp->grp_info) { - PMD_DRV_LOG(ERR, - "Failed to alloc %zu bytes for grp info tbl.\n", - sizeof(*bp->grp_info) * bp->max_ring_grps); - rc = -ENOMEM; - goto error_free; - } - } - - /* Forward all requests if firmware is new enough */ - if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && - (bp->fw_ver < ((20 << 24) | (7 << 16)))) || - ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { - memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); - } else { - PMD_DRV_LOG(WARNING, - "Firmware too old for VF mailbox functionality\n"); - memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); - } - - /* - * The following are used for driver cleanup. If we disallow these, - * VF drivers can't clean up cleanly. - */ - ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); - ALLOW_FUNC(HWRM_VNIC_FREE); - ALLOW_FUNC(HWRM_RING_FREE); - ALLOW_FUNC(HWRM_RING_GRP_FREE); - ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); - ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); - ALLOW_FUNC(HWRM_STAT_CTX_FREE); - ALLOW_FUNC(HWRM_PORT_PHY_QCFG); - ALLOW_FUNC(HWRM_VNIC_TPA_CFG); - rc = bnxt_hwrm_func_driver_register(bp); - if (rc) { - PMD_DRV_LOG(ERR, - "Failed to register driver"); - rc = -EBUSY; - goto error_free; - } - PMD_DRV_LOG(INFO, - DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", - pci_dev->mem_resource[0].phys_addr, - pci_dev->mem_resource[0].addr); - - rc = bnxt_hwrm_func_qcfg(bp, &mtu); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n"); - goto error_free; - } - - if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU && - mtu != eth_dev->data->mtu) - eth_dev->data->mtu = mtu; - - if (BNXT_PF(bp)) { - //if (bp->pf.active_vfs) { - // TODO: Deallocate VF resources? - //} - if (bp->pdev->max_vfs) { - rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); - if (rc) { - PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); - goto error_free; - } - } else { - rc = bnxt_hwrm_allocate_pf_only(bp); - if (rc) { - PMD_DRV_LOG(ERR, - "Failed to allocate PF resources\n"); - goto error_free; - } - } - } - - bnxt_hwrm_port_led_qcaps(bp); - - rc = bnxt_setup_int(bp); - if (rc) - goto error_free; - - rc = bnxt_alloc_mem(bp); - if (rc) - goto error_free; - - bnxt_init_nic(bp); - - rc = bnxt_request_int(bp); - if (rc) - goto error_free; + DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", + pci_dev->mem_resource[0].phys_addr, + pci_dev->mem_resource[0].addr); return 0; error_free: bnxt_dev_uninit(eth_dev); -error: + return rc; +} + +static int +bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) +{ + int rc; + + bnxt_disable_int(bp); + bnxt_free_int(bp); + bnxt_free_mem(bp, reconfig_dev); + bnxt_hwrm_func_buf_unrgtr(bp); + rc = bnxt_hwrm_func_driver_unregister(bp, 0); + bp->flags &= ~BNXT_FLAG_REGISTERED; + bnxt_free_ctx_mem(bp); + if (!reconfig_dev) + bnxt_free_hwrm_resources(bp); + return rc; } @@ -3993,18 +4311,13 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); - bnxt_disable_int(bp); - bnxt_free_int(bp); - bnxt_free_mem(bp); - bnxt_hwrm_func_buf_unrgtr(bp); + rc = bnxt_uninit_resources(bp, false); if (bp->grp_info != NULL) { rte_free(bp->grp_info); bp->grp_info = NULL; } - rc = bnxt_hwrm_func_driver_unregister(bp, 0); - bnxt_free_hwrm_resources(bp); if (bp->tx_mem_zone) { rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); @@ -4020,7 +4333,6 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) bnxt_dev_close_op(eth_dev); if (bp->pf.vf_info) rte_free(bp->pf.vf_info); - bnxt_free_ctx_mem(bp); eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL;