X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_ethdev.c;h=e8b4c058a16377c761480756c1acacd08c161b47;hb=9311beeea4963d8b97330c20ff1736b006486c5d;hp=d29671d64ff043329b4c1a202d5525de44232e61;hpb=74bcfc062489186c72ccddcfd7cca6c8e7a32286;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index d29671d64f..e8b4c058a1 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -155,6 +155,7 @@ static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); static void bnxt_cancel_fw_health_check(struct bnxt *bp); static int bnxt_restore_vlan_filters(struct bnxt *bp); static void bnxt_dev_recover(void *arg); +static void bnxt_free_error_recovery_info(struct bnxt *bp); int is_bnxt_in_error(struct bnxt *bp) { @@ -190,6 +191,34 @@ static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; } +static void bnxt_free_pf_info(struct bnxt *bp) +{ + rte_free(bp->pf); +} + +static void bnxt_free_link_info(struct bnxt *bp) +{ + rte_free(bp->link_info); +} + +static void bnxt_free_leds_info(struct bnxt *bp) +{ + rte_free(bp->leds); + bp->leds = NULL; +} + +static void bnxt_free_flow_stats_info(struct bnxt *bp) +{ + rte_free(bp->flow_stat); + bp->flow_stat = NULL; +} + +static void bnxt_free_cos_queues(struct bnxt *bp) +{ + rte_free(bp->rx_cos_queue); + rte_free(bp->tx_cos_queue); +} + static void bnxt_free_mem(struct bnxt *bp, bool reconfig) { bnxt_free_filter_mem(bp); @@ -212,6 +241,67 @@ static void bnxt_free_mem(struct bnxt *bp, bool reconfig) bp->grp_info = NULL; } +static int bnxt_alloc_pf_info(struct bnxt *bp) +{ + bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); + if (bp->pf == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_link_info(struct bnxt *bp) +{ + bp->link_info = + rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); + if (bp->link_info == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_leds_info(struct bnxt *bp) +{ + bp->leds = rte_zmalloc("bnxt_leds", + BNXT_MAX_LED * sizeof(struct bnxt_led_info), + 0); + if (bp->leds == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_cos_queues(struct bnxt *bp) +{ + bp->rx_cos_queue = + rte_zmalloc("bnxt_rx_cosq", + BNXT_COS_QUEUE_COUNT * + sizeof(struct bnxt_cos_queue_info), + 0); + if (bp->rx_cos_queue == NULL) + return -ENOMEM; + + bp->tx_cos_queue = + rte_zmalloc("bnxt_tx_cosq", + BNXT_COS_QUEUE_COUNT * + sizeof(struct bnxt_cos_queue_info), + 0); + if (bp->tx_cos_queue == NULL) + return -ENOMEM; + + return 0; +} + +static int bnxt_alloc_flow_stats_info(struct bnxt *bp) +{ + bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", + sizeof(struct bnxt_flow_stat_info), 0); + if (bp->flow_stat == NULL) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) { int rc; @@ -244,6 +334,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) if (rc) goto alloc_mem_err; + if (BNXT_FLOW_XSTATS_EN(bp)) { + rc = bnxt_alloc_flow_stats_info(bp); + if (rc) + goto alloc_mem_err; + } + return 0; alloc_mem_err: @@ -345,68 +441,72 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp) { int rc = 0; - rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_in_tbl.dma, - &bp->rx_fc_in_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, + &bp->flow_stat->rx_fc_in_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" " rx_fc_in_tbl.ctx_id = %d\n", - bp->rx_fc_in_tbl.va, - (void *)((uintptr_t)bp->rx_fc_in_tbl.dma), - bp->rx_fc_in_tbl.ctx_id); + bp->flow_stat->rx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), + bp->flow_stat->rx_fc_in_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_out_tbl.dma, - &bp->rx_fc_out_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, + &bp->flow_stat->rx_fc_out_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" " rx_fc_out_tbl.ctx_id = %d\n", - bp->rx_fc_out_tbl.va, - (void *)((uintptr_t)bp->rx_fc_out_tbl.dma), - bp->rx_fc_out_tbl.ctx_id); + bp->flow_stat->rx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), + bp->flow_stat->rx_fc_out_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_in_tbl.dma, - &bp->tx_fc_in_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, + &bp->flow_stat->tx_fc_in_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" " tx_fc_in_tbl.ctx_id = %d\n", - bp->tx_fc_in_tbl.va, - (void *)((uintptr_t)bp->tx_fc_in_tbl.dma), - bp->tx_fc_in_tbl.ctx_id); + bp->flow_stat->tx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), + bp->flow_stat->tx_fc_in_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_out_tbl.dma, - &bp->tx_fc_out_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, + &bp->flow_stat->tx_fc_out_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" " tx_fc_out_tbl.ctx_id = %d\n", - bp->tx_fc_out_tbl.va, - (void *)((uintptr_t)bp->tx_fc_out_tbl.dma), - bp->tx_fc_out_tbl.ctx_id); + bp->flow_stat->tx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), + bp->flow_stat->tx_fc_out_tbl.ctx_id); - memset(bp->rx_fc_out_tbl.va, 0, bp->rx_fc_out_tbl.size); + memset(bp->flow_stat->rx_fc_out_tbl.va, + 0, + bp->flow_stat->rx_fc_out_tbl.size); rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->rx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, true); if (rc) return rc; - memset(bp->tx_fc_out_tbl.va, 0, bp->tx_fc_out_tbl.size); + memset(bp->flow_stat->tx_fc_out_tbl.va, + 0, + bp->flow_stat->tx_fc_out_tbl.size); rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->tx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, true); return rc; @@ -437,33 +537,41 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) uint16_t max_fc; int rc = 0; - max_fc = bp->max_fc; + max_fc = bp->flow_stat->max_fc; sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->rx_fc_in_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->rx_fc_in_tbl); if (rc) return rc; sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->rx_fc_out_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->rx_fc_out_tbl); if (rc) return rc; sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->tx_fc_in_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->tx_fc_in_tbl); if (rc) return rc; sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->tx_fc_out_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->tx_fc_out_tbl); if (rc) return rc; @@ -477,10 +585,11 @@ static int bnxt_init_ctx_mem(struct bnxt *bp) int rc = 0; if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || - !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) + !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || + !BNXT_FLOW_XSTATS_EN(bp)) return 0; - rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->max_fc); + rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); if (rc) return rc; @@ -627,7 +736,7 @@ skip_cosq_cfg: goto err_free; } - if (!bp->link_info.link_up) { + if (!bp->link_info->link_up) { rc = bnxt_set_hwrm_link_config(bp, true); if (rc) { PMD_DRV_LOG(ERR, @@ -667,6 +776,43 @@ static int bnxt_shutdown_nic(struct bnxt *bp) * Device configuration and status function */ +uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) +{ + uint32_t link_speed = bp->link_info->support_speeds; + uint32_t speed_capa = 0; + + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) + speed_capa |= ETH_LINK_SPEED_100M; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) + speed_capa |= ETH_LINK_SPEED_100M_HD; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) + speed_capa |= ETH_LINK_SPEED_1G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) + speed_capa |= ETH_LINK_SPEED_2_5G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) + speed_capa |= ETH_LINK_SPEED_10G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) + speed_capa |= ETH_LINK_SPEED_20G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) + speed_capa |= ETH_LINK_SPEED_25G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) + speed_capa |= ETH_LINK_SPEED_40G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) + speed_capa |= ETH_LINK_SPEED_50G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) + speed_capa |= ETH_LINK_SPEED_100G; + if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB) + speed_capa |= ETH_LINK_SPEED_200G; + + if (bp->link_info->auto_mode == + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) + speed_capa |= ETH_LINK_SPEED_FIXED; + else + speed_capa |= ETH_LINK_SPEED_AUTONEG; + + return speed_capa; +} + static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { @@ -710,6 +856,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; + dev_info->speed_capa = bnxt_get_speed_capabilities(bp); + /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -945,7 +1093,7 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_VLAN_FILTER)) && - !bp->truflow) { + !BNXT_TRUFLOW_EN(bp)) { PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", eth_dev->data->port_id); bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; @@ -1015,7 +1163,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int vlan_mask = 0; - int rc; + int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); @@ -1028,14 +1176,23 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); } - rc = bnxt_hwrm_if_change(bp, 1); - if (!rc) { - if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { - rc = bnxt_handle_if_change_status(bp); - if (rc) - return rc; - } + do { + rc = bnxt_hwrm_if_change(bp, true); + if (rc == 0 || rc != -EAGAIN) + break; + + rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); + } while (retry_cnt--); + + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { + rc = bnxt_handle_if_change_status(bp); + if (rc) + return rc; } + bnxt_enable_int(bp); rc = bnxt_init_chip(bp); @@ -1062,16 +1219,16 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) bnxt_schedule_fw_health_check(bp); pthread_mutex_unlock(&bp->def_cp_lock); - if (bp->truflow) + if (BNXT_TRUFLOW_EN(bp)) bnxt_ulp_init(bp); return 0; error: - bnxt_hwrm_if_change(bp, 0); bnxt_shutdown_nic(bp); bnxt_free_tx_mbufs(bp); bnxt_free_rx_mbufs(bp); + bnxt_hwrm_if_change(bp, false); eth_dev->data->dev_started = 0; return rc; } @@ -1081,7 +1238,7 @@ static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) struct bnxt *bp = eth_dev->data->dev_private; int rc = 0; - if (!bp->link_info.link_up) + if (!bp->link_info->link_up) rc = bnxt_set_hwrm_link_config(bp, true); if (!rc) eth_dev->data->dev_link.link_status = 1; @@ -1096,7 +1253,7 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) eth_dev->data->dev_link.link_status = 0; bnxt_set_hwrm_link_config(bp, false); - bp->link_info.link_up = 0; + bp->link_info->link_up = 0; return 0; } @@ -1108,7 +1265,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - if (bp->truflow) + if (BNXT_TRUFLOW_EN(bp)) bnxt_ulp_deinit(bp); eth_dev->data->dev_started = 0; @@ -1145,13 +1302,16 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) /* Process any remaining notifications in default completion queue */ bnxt_int_handler(eth_dev); bnxt_shutdown_nic(bp); - bnxt_hwrm_if_change(bp, 0); + bnxt_hwrm_if_change(bp, false); rte_free(bp->mark_table); bp->mark_table = NULL; bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; bp->rx_cosq_cnt = 0; + /* All filters are deleted on a port stop. */ + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count = 0; } static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) @@ -1168,6 +1328,11 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) bnxt_uninit_resources(bp, false); + bnxt_free_leds_info(bp); + bnxt_free_cos_queues(bp); + bnxt_free_link_info(bp); + bnxt_free_pf_info(bp); + eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -1177,8 +1342,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); bp->rx_mem_zone = NULL; - rte_free(bp->pf.vf_info); - bp->pf.vf_info = NULL; + rte_free(bp->pf->vf_info); + bp->pf->vf_info = NULL; rte_free(bp->grp_info); bp->grp_info = NULL; @@ -1629,7 +1794,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, } bp->flags |= BNXT_FLAG_UPDATE_HASH; - memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); + memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, + rss_conf, + sizeof(*rss_conf)); /* Update the default RSS VNIC(s) */ vnic = BNXT_GET_DEFAULT_VNIC(bp); @@ -1706,7 +1873,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, } if (hash_types) { PMD_DRV_LOG(ERR, - "Unknwon RSS config from firmware (%08x), RSS disabled", + "Unknown RSS config from firmware (%08x), RSS disabled", vnic->hash_type); return -ENOTSUP; } @@ -1732,9 +1899,9 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, return rc; memset(fc_conf, 0, sizeof(*fc_conf)); - if (bp->link_info.auto_pause) + if (bp->link_info->auto_pause) fc_conf->autoneg = 1; - switch (bp->link_info.pause) { + switch (bp->link_info->pause) { case 0: fc_conf->mode = RTE_FC_NONE; break; @@ -1769,40 +1936,40 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, switch (fc_conf->mode) { case RTE_FC_NONE: - bp->link_info.auto_pause = 0; - bp->link_info.force_pause = 0; + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = 0; break; case RTE_FC_RX_PAUSE: if (fc_conf->autoneg) { - bp->link_info.auto_pause = + bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; - bp->link_info.force_pause = 0; + bp->link_info->force_pause = 0; } else { - bp->link_info.auto_pause = 0; - bp->link_info.force_pause = + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; } break; case RTE_FC_TX_PAUSE: if (fc_conf->autoneg) { - bp->link_info.auto_pause = + bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; - bp->link_info.force_pause = 0; + bp->link_info->force_pause = 0; } else { - bp->link_info.auto_pause = 0; - bp->link_info.force_pause = + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; } break; case RTE_FC_FULL: if (fc_conf->autoneg) { - bp->link_info.auto_pause = + bp->link_info->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; - bp->link_info.force_pause = 0; + bp->link_info->force_pause = 0; } else { - bp->link_info.auto_pause = 0; - bp->link_info.force_pause = + bp->link_info->auto_pause = 0; + bp->link_info->force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; } @@ -2037,6 +2204,11 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, if (rc) return rc; + if (!eth_dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); + return -EINVAL; + } + /* These operations apply to ALL existing MAC/VLAN filters */ if (on) return bnxt_add_vlan_filter(bp, vlan_id); @@ -2122,6 +2294,8 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) rte_free(vnic->fw_grp_ids); vnic->fw_grp_ids = NULL; + vnic->rx_queue_cnt = 0; + return 0; } @@ -2351,10 +2525,11 @@ bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; + uint8_t fw_rsvd = bp->fw_ver & 0xff; int ret; - ret = snprintf(fw_version, fw_size, "%d.%d.%d", - fw_major, fw_minor, fw_updt); + ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", + fw_major, fw_minor, fw_updt, fw_rsvd); ret += 1; /* add the size of '\0' */ if (fw_size < (uint32_t)ret) @@ -3470,7 +3645,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev, case RTE_ETH_FILTER_GENERIC: if (filter_op != RTE_ETH_FILTER_GET) return -EINVAL; - if (bp->truflow) + if (BNXT_TRUFLOW_EN(bp)) *(const void **)arg = &bnxt_ulp_rte_flow_ops; else *(const void **)arg = &bnxt_flow_ops; @@ -3591,7 +3766,7 @@ static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_pf_info *pf = bp->pf; uint16_t port_id; uint32_t fifo; @@ -4086,7 +4261,7 @@ static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) static void bnxt_dev_cleanup(struct bnxt *bp) { bnxt_set_hwrm_link_config(bp, false); - bp->link_info.link_up = 0; + bp->link_info->link_up = 0; if (bp->eth_dev->data->dev_started) bnxt_dev_stop_op(bp->eth_dev); @@ -4591,7 +4766,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); - for (i = 0; i < BNXT_MAX_Q; i++) { + for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { if (bp->ctx->tqm_mem[i]) rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); } @@ -4619,6 +4794,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp) struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; uint32_t mem_size, ena, entries; + uint32_t entries_sp, min; int i, rc; rc = bnxt_hwrm_func_backing_store_qcaps(bp); @@ -4666,16 +4842,20 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp) if (rc) return rc; - entries = ctx->qp_max_l2_entries + - ctx->vnic_max_vnic_entries + - ctx->tqm_min_entries_per_ring; + min = ctx->tqm_min_entries_per_ring; + + entries_sp = ctx->qp_max_l2_entries + + ctx->vnic_max_vnic_entries + + 2 * ctx->qp_min_qp1_entries + min; + entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); + + entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); - entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring, + entries = clamp_t(uint32_t, entries, min, ctx->tqm_max_entries_per_ring); - for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) { + for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { ctx_pg = ctx->tqm_mem[i]; - /* use min tqm entries for now. */ - ctx_pg->entries = entries; + ctx_pg->entries = i ? entries : entries_sp; mem_size = ctx->tqm_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); if (rc) @@ -4798,7 +4978,7 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) return -ENOMEM; } - if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) { + if (!BNXT_HAS_DFLT_MAC_SET(bp)) { if (BNXT_PF(bp)) return -EINVAL; @@ -4811,14 +4991,11 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); rc = bnxt_hwrm_set_mac(bp); - if (!rc) - memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr, - RTE_ETHER_ADDR_LEN); - return rc; + if (rc) + return rc; } /* Copy the permanent MAC from the FUNC_QCAPS response */ - memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN); memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); return rc; @@ -4829,7 +5006,7 @@ static int bnxt_restore_dflt_mac(struct bnxt *bp) int rc = 0; /* MAC is already configured in FW */ - if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) + if (BNXT_HAS_DFLT_MAC_SET(bp)) return 0; /* Restore the old MAC configured */ @@ -4848,7 +5025,7 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp) #define ALLOW_FUNC(x) \ { \ uint32_t arg = (x); \ - bp->pf.vf_req_fwd[((arg) >> 5)] &= \ + bp->pf->vf_req_fwd[((arg) >> 5)] &= \ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ } @@ -4856,11 +5033,11 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp) if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && (bp->fw_ver < ((20 << 24) | (7 << 16)))) || ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { - memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); + memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd)); } else { PMD_DRV_LOG(WARNING, "Firmware too old for VF mailbox functionality\n"); - memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); + memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); } /* @@ -4917,6 +5094,89 @@ bnxt_get_fw_func_id(uint16_t port) return bp->fw_fid; } +static void bnxt_alloc_error_recovery_info(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + + if (info) { + if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) + memset(info, 0, sizeof(*info)); + return; + } + + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + if (!info) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + + bp->recovery_info = info; +} + +static void bnxt_check_fw_status(struct bnxt *bp) +{ + uint32_t fw_status; + + if (!(bp->recovery_info && + (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) + return; + + fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); + if (fw_status != BNXT_FW_STATUS_HEALTHY) + PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", + fw_status); +} + +static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) +{ + struct bnxt_error_recovery_info *info = bp->recovery_info; + uint32_t status_loc; + uint32_t sig_ver; + + rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCP_WINDOW_2_BASE + + offsetof(struct hcomm_status, + sig_ver))); + /* If the signature is absent, then FW does not support this feature */ + if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != + HCOMM_STATUS_SIGNATURE_VAL) + return 0; + + if (!info) { + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + if (!info) + return -ENOMEM; + bp->recovery_info = info; + } else { + memset(info, 0, sizeof(*info)); + } + + status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + + BNXT_GRCP_WINDOW_2_BASE + + offsetof(struct hcomm_status, + fw_status_loc))); + + /* Only pre-map the FW health status GRC register */ + if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) + return 0; + + info->status_regs[BNXT_FW_STATUS_REG] = status_loc; + info->mapped_status_regs[BNXT_FW_STATUS_REG] = + BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); + + rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + + bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; + + return 0; +} + static int bnxt_init_fw(struct bnxt *bp) { uint16_t mtu; @@ -4924,10 +5184,16 @@ static int bnxt_init_fw(struct bnxt *bp) bp->fw_cap = 0; - rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); + rc = bnxt_map_hcomm_fw_status_reg(bp); if (rc) return rc; + rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); + if (rc) { + bnxt_check_fw_status(bp); + return rc; + } + rc = bnxt_hwrm_func_reset(bp); if (rc) return -EIO; @@ -4958,6 +5224,7 @@ static int bnxt_init_fw(struct bnxt *bp) if (rc) return rc; + bnxt_alloc_error_recovery_info(bp); /* Get the adapter error recovery support info */ rc = bnxt_hwrm_error_recovery_qcfg(bp); if (rc) @@ -5081,8 +5348,8 @@ bnxt_parse_devarg_truflow(__rte_unused const char *key, return -EINVAL; } - bp->truflow = truflow; - if (bp->truflow) + bp->flags |= BNXT_FLAG_TRUFLOW_EN; + if (BNXT_TRUFLOW_EN(bp)) PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); return 0; @@ -5116,8 +5383,8 @@ bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, return -EINVAL; } - bp->flow_xstat = flow_xstat; - if (bp->flow_xstat) + bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; + if (BNXT_FLOW_XSTATS_EN(bp)) PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); return 0; @@ -5202,12 +5469,28 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return rc; } + rc = bnxt_alloc_pf_info(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_link_info(bp); + if (rc) + goto error_free; + rc = bnxt_alloc_hwrm_resources(bp); if (rc) { PMD_DRV_LOG(ERR, "Failed to allocate hwrm resource rc: %x\n", rc); goto error_free; } + rc = bnxt_alloc_leds_info(bp); + if (rc) + goto error_free; + + rc = bnxt_alloc_cos_queues(bp); + if (rc) + goto error_free; + rc = bnxt_init_resources(bp, false); if (rc) goto error_free; @@ -5251,46 +5534,55 @@ static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) { bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->rx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, false); bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->tx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, false); - if (bp->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_in_tbl.ctx_id); - bp->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); + bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_out_tbl.ctx_id); - bp->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); + bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_in_tbl.ctx_id); - bp->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); + bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_out_tbl.ctx_id); - bp->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); + bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; } static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) { bnxt_unregister_fc_ctx_mem(bp); - bnxt_free_ctx_mem_buf(&bp->rx_fc_in_tbl); - bnxt_free_ctx_mem_buf(&bp->rx_fc_out_tbl); - bnxt_free_ctx_mem_buf(&bp->tx_fc_in_tbl); - bnxt_free_ctx_mem_buf(&bp->tx_fc_out_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); } static void bnxt_uninit_ctx_mem(struct bnxt *bp) { - bnxt_uninit_fc_ctx_mem(bp); + if (BNXT_FLOW_XSTATS_EN(bp)) + bnxt_uninit_fc_ctx_mem(bp); +} + +static void +bnxt_free_error_recovery_info(struct bnxt *bp) +{ + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } static void @@ -5313,16 +5605,13 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) bnxt_free_ctx_mem(bp); if (!reconfig_dev) { bnxt_free_hwrm_resources(bp); - - if (bp->recovery_info != NULL) { - rte_free(bp->recovery_info); - bp->recovery_info = NULL; - } + bnxt_free_error_recovery_info(bp); } bnxt_uninit_ctx_mem(bp); bnxt_uninit_locks(bp); + bnxt_free_flow_stats_info(bp); rte_free(bp->ptp_cfg); bp->ptp_cfg = NULL; return rc;