X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=82e89b7c8af7d4b091a984e8b49a71663a1d348f;hb=586e6028371764452b7dbae3b1f50adde87be80e;hp=3de5c7e3a1a4c01a7cb7b725e461e51933a49490;hpb=b0764e7c207b0932ecd73655d8f7aa015e6733f4;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 3de5c7e3a1..82e89b7c8a 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t j = dst_id - 1; //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ - if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) && conf->pool_map[j].pools & (1UL << j)) { PMD_DRV_LOG(DEBUG, "Add vlan %u to vmdq pool %u\n", @@ -750,9 +750,13 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!BNXT_CHIP_P5(bp) && - !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) - return 0; + if (BNXT_CHIP_P5(bp)) { + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS)) + return 0; + } else { + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + return 0; + } if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; @@ -792,6 +796,9 @@ void bnxt_free_vf_info(struct bnxt *bp) { int i; + if (bp->pf == NULL) + return; + if (bp->pf->vf_info == NULL) return; @@ -934,6 +941,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN; + if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) { + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n"); + } unlock: HWRM_UNLOCK(); @@ -994,6 +1005,11 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP) bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2; + if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) { + bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP; + PMD_DRV_LOG(DEBUG, "Rx VLAN strip capability enabled\n"); + } + bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported); HWRM_UNLOCK(); @@ -1043,9 +1059,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); - req.ver_maj = RTE_VER_YEAR; - req.ver_min = RTE_VER_MONTH; - req.ver_upd = RTE_VER_MINOR; + req.ver_maj_8b = RTE_VER_YEAR; + req.ver_min_8b = RTE_VER_MONTH; + req.ver_upd_8b = RTE_VER_MINOR; if (BNXT_PF(bp)) { req.enables |= rte_cpu_to_le_32( @@ -1078,7 +1094,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE); req.async_event_fwd[2] |= - rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST); + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST | + ASYNC_CMPL_EVENT_ID_ERROR_REPORT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1284,28 +1301,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) max_resp_len = rte_le_to_cpu_16(resp->max_resp_len); dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg); - if (bp->max_resp_len != max_resp_len) { - sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, - bp->pdev->addr.domain, bp->pdev->addr.bus, - bp->pdev->addr.devid, bp->pdev->addr.function); - - rte_free(bp->hwrm_cmd_resp_addr); - - bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0); - if (bp->hwrm_cmd_resp_addr == NULL) { - rc = -ENOMEM; - goto error; - } - bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { - PMD_DRV_LOG(ERR, - "Unable to map response buffer to physical memory.\n"); - rc = -ENOMEM; - goto error; - } - bp->max_resp_len = max_resp_len; - } + RTE_VERIFY(max_resp_len <= bp->max_resp_len); + bp->max_resp_len = max_resp_len; if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && @@ -1362,12 +1359,18 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS; } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) { + PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n"); + bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN; + } + error: HWRM_UNLOCK(); return rc; } -int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) +int bnxt_hwrm_func_driver_unregister(struct bnxt *bp) { int rc; struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 }; @@ -1377,13 +1380,15 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) return 0; HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); - req.flags = flags; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n", + bp->eth_dev->data->port_id); + return rc; } @@ -1468,8 +1473,6 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n", - bp->eth_dev->data->port_id); return rc; } @@ -1501,6 +1504,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); + link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask); link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed); link_info->phy_ver[0] = resp->phy_maj; @@ -1514,6 +1518,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, rte_le_to_cpu_16(resp->support_pam4_speeds); link_info->auto_pam4_link_speeds = rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask); + link_info->module_status = resp->module_status; HWRM_UNLOCK(); PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n", @@ -1542,7 +1547,7 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); bp->port_cnt = resp->port_cnt; if (resp->supported_speeds_auto_mode) @@ -1554,6 +1559,12 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp) HWRM_UNLOCK(); + /* Older firmware does not have supported_auto_speeds, so assume + * that all supported speeds can be autonegotiated. + */ + if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds) + link_info->support_auto_speeds = link_info->support_speeds; + return 0; } @@ -1808,6 +1819,9 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return -EINVAL; + HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB); req.ring_type = ring_type; @@ -1815,6 +1829,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + ring->fw_ring_id = INVALID_HW_RING_ID; if (rc || resp->error_code) { if (rc == 0 && resp->error_code) @@ -1857,6 +1872,10 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; + /* Don't attempt to re-create the ring group if it is already created */ + if (bp->grp_info[idx].fw_grp_id != INVALID_HW_RING_ID) + return 0; + HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); @@ -1881,6 +1900,9 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; + if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) + return 0; + HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); @@ -1900,7 +1922,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 }; struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) return rc; HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); @@ -1915,13 +1937,15 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) return rc; } -int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { int rc; struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) + return 0; + HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); req.update_period_ms = rte_cpu_to_le_32(0); @@ -1939,13 +1963,15 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return rc; } -int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { int rc; struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) + return 0; + HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1955,6 +1981,8 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); + cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + return rc; } @@ -2600,48 +2628,54 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) unsigned int i; struct bnxt_cp_ring_info *cpr; - for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { - if (i >= bp->rx_cp_nr_rings) { - cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; - } else { - cpr = bp->rx_queues[i]->cp_ring; - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[i].fw_stats_ctx = -1; - } - if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { - rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); - cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - if (rc) - return rc; - } + cpr = bp->rx_queues[i]->cp_ring; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[i].fw_stats_ctx = -1; + rc = bnxt_hwrm_stat_ctx_free(bp, cpr); + if (rc) + return rc; + } + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + cpr = bp->tx_queues[i]->cp_ring; + rc = bnxt_hwrm_stat_ctx_free(bp, cpr); + if (rc) + return rc; } + return 0; } int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) { + struct bnxt_cp_ring_info *cpr; unsigned int i; int rc = 0; - for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - struct bnxt_tx_queue *txq; - struct bnxt_rx_queue *rxq; - struct bnxt_cp_ring_info *cpr; + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; - if (i >= bp->rx_cp_nr_rings) { - txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; - cpr = txq->cp_ring; - } else { - rxq = bp->rx_queues[i]; - cpr = rxq->cp_ring; + cpr = rxq->cp_ring; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); + if (rc) + return rc; } + } - rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i); + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; - if (rc) - return rc; + cpr = txq->cp_ring; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); + if (rc) + return rc; + } } + return rc; } @@ -2674,11 +2708,9 @@ void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) bnxt_hwrm_ring_free(bp, cp_ring, HWRM_RING_FREE_INPUT_RING_TYPE_NQ, INVALID_HW_RING_ID); - cp_ring->fw_ring_id = INVALID_HW_RING_ID; - memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * - sizeof(*cpr->cp_desc_ring)); + memset(cpr->cp_desc_ring, 0, + cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; - cpr->valid = 0; } void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) @@ -2686,13 +2718,11 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; bnxt_hwrm_ring_free(bp, cp_ring, - HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL, - INVALID_HW_RING_ID); - cp_ring->fw_ring_id = INVALID_HW_RING_ID; - memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * - sizeof(*cpr->cp_desc_ring)); + HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + memset(cpr->cp_desc_ring, 0, + cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; - cpr->valid = 0; } void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) @@ -2702,63 +2732,67 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) struct bnxt_ring *ring = rxr->rx_ring_struct; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX, - cpr->cp_ring_struct->fw_ring_id); - ring->fw_ring_id = INVALID_HW_RING_ID; - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[queue_index].rx_fw_ring_id = - INVALID_HW_RING_ID; - } + if (BNXT_HAS_RING_GRPS(bp)) + bnxt_hwrm_ring_grp_free(bp, queue_index); + + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX, + cpr->cp_ring_struct->fw_ring_id); + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + + /* Check agg ring struct explicitly. + * bnxt_need_agg_ring() returns the current state of offload flags, + * but we may have to deal with agg ring struct before the offload + * flags are updated. + */ + if (!bnxt_need_agg_ring(bp->eth_dev) || rxr->ag_ring_struct == NULL) + goto no_agg; + ring = rxr->ag_ring_struct; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - BNXT_CHIP_P5(bp) ? - HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : - HWRM_RING_FREE_INPUT_RING_TYPE_RX, - cpr->cp_ring_struct->fw_ring_id); - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[queue_index].ag_fw_ring_id = - INVALID_HW_RING_ID; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) - bnxt_free_cp_ring(bp, cpr); + bnxt_hwrm_ring_free(bp, ring, + BNXT_CHIP_P5(bp) ? + HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : + HWRM_RING_FREE_INPUT_RING_TYPE_RX, + cpr->cp_ring_struct->fw_ring_id); + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + +no_agg: + bnxt_hwrm_stat_ctx_free(bp, cpr); + + bnxt_free_cp_ring(bp, cpr); if (BNXT_HAS_RING_GRPS(bp)) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } +int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index) +{ + int rc; + struct hwrm_ring_reset_input req = {.req_type = 0 }; + struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB); + + req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP; + req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + static int bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; - for (i = 0; i < bp->tx_cp_nr_rings; i++) { - struct bnxt_tx_queue *txq = bp->tx_queues[i]; - struct bnxt_tx_ring_info *txr = txq->tx_ring; - struct bnxt_ring *ring = txr->tx_ring_struct; - struct bnxt_cp_ring_info *cpr = txq->cp_ring; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_TX, - cpr->cp_ring_struct->fw_ring_id); - ring->fw_ring_id = INVALID_HW_RING_ID; - memset(txr->tx_desc_ring, 0, - txr->tx_ring_struct->ring_size * - sizeof(*txr->tx_desc_ring)); - memset(txr->tx_buf_ring, 0, - txr->tx_ring_struct->ring_size * - sizeof(*txr->tx_buf_ring)); - txr->tx_raw_prod = 0; - txr->tx_raw_cons = 0; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } + for (i = 0; i < bp->tx_cp_nr_rings; i++) + bnxt_free_hwrm_tx_ring(bp, i); for (i = 0; i < bp->rx_cp_nr_rings; i++) bnxt_free_hwrm_rx_ring(bp, i); @@ -2804,7 +2838,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); - bp->max_resp_len = HWRM_MAX_RESP_LEN; + bp->max_resp_len = BNXT_PAGE_SIZE; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; @@ -2945,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) { uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; - if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG) + if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG) return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; switch (conf_link_speed) { - case ETH_LINK_SPEED_10M_HD: - case ETH_LINK_SPEED_100M_HD: + case RTE_ETH_LINK_SPEED_10M_HD: + case RTE_ETH_LINK_SPEED_100M_HD: /* FALLTHROUGH */ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; } @@ -2967,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, { uint16_t eth_link_speed = 0; - if (conf_link_speed == ETH_LINK_SPEED_AUTONEG) - return ETH_LINK_SPEED_AUTONEG; + if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) + return RTE_ETH_LINK_SPEED_AUTONEG; - switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { - case ETH_LINK_SPEED_100M: - case ETH_LINK_SPEED_100M_HD: + switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) { + case RTE_ETH_LINK_SPEED_100M: + case RTE_ETH_LINK_SPEED_100M_HD: /* FALLTHROUGH */ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB; break; - case ETH_LINK_SPEED_1G: + case RTE_ETH_LINK_SPEED_1G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB; break; - case ETH_LINK_SPEED_2_5G: + case RTE_ETH_LINK_SPEED_2_5G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB; break; - case ETH_LINK_SPEED_10G: + case RTE_ETH_LINK_SPEED_10G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB; break; - case ETH_LINK_SPEED_20G: + case RTE_ETH_LINK_SPEED_20G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB; break; - case ETH_LINK_SPEED_25G: + case RTE_ETH_LINK_SPEED_25G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; break; - case ETH_LINK_SPEED_40G: + case RTE_ETH_LINK_SPEED_40G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; - case ETH_LINK_SPEED_50G: + case RTE_ETH_LINK_SPEED_50G: eth_link_speed = pam4_link ? HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB : HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; - case ETH_LINK_SPEED_100G: + case RTE_ETH_LINK_SPEED_100G: eth_link_speed = pam4_link ? HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB : HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; break; - case ETH_LINK_SPEED_200G: + case RTE_ETH_LINK_SPEED_200G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB; break; @@ -3024,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, return eth_link_speed; } -#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \ - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \ - ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ - ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \ - ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G) +#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \ + RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \ + RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \ + RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \ + RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G) static int bnxt_validate_link_speed(struct bnxt *bp) { @@ -3037,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp) uint32_t link_speed_capa; uint32_t one_speed; - if (link_speed == ETH_LINK_SPEED_AUTONEG) + if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) return 0; link_speed_capa = bnxt_get_speed_capabilities(bp); - if (link_speed & ETH_LINK_SPEED_FIXED) { - one_speed = link_speed & ~ETH_LINK_SPEED_FIXED; + if (link_speed & RTE_ETH_LINK_SPEED_FIXED) { + one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED; if (one_speed & (one_speed - 1)) { PMD_DRV_LOG(ERR, @@ -3073,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) { uint16_t ret = 0; - if (link_speed == ETH_LINK_SPEED_AUTONEG) { + if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) { if (bp->link_info->support_speeds) return bp->link_info->support_speeds; link_speed = BNXT_SUPPORTED_SPEEDS; } - if (link_speed & ETH_LINK_SPEED_100M) + if (link_speed & RTE_ETH_LINK_SPEED_100M) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; - if (link_speed & ETH_LINK_SPEED_100M_HD) + if (link_speed & RTE_ETH_LINK_SPEED_100M_HD) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; - if (link_speed & ETH_LINK_SPEED_1G) + if (link_speed & RTE_ETH_LINK_SPEED_1G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB; - if (link_speed & ETH_LINK_SPEED_2_5G) + if (link_speed & RTE_ETH_LINK_SPEED_2_5G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB; - if (link_speed & ETH_LINK_SPEED_10G) + if (link_speed & RTE_ETH_LINK_SPEED_10G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB; - if (link_speed & ETH_LINK_SPEED_20G) + if (link_speed & RTE_ETH_LINK_SPEED_20G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB; - if (link_speed & ETH_LINK_SPEED_25G) + if (link_speed & RTE_ETH_LINK_SPEED_25G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB; - if (link_speed & ETH_LINK_SPEED_40G) + if (link_speed & RTE_ETH_LINK_SPEED_40G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; - if (link_speed & ETH_LINK_SPEED_50G) + if (link_speed & RTE_ETH_LINK_SPEED_50G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; - if (link_speed & ETH_LINK_SPEED_100G) + if (link_speed & RTE_ETH_LINK_SPEED_100G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB; - if (link_speed & ETH_LINK_SPEED_200G) + if (link_speed & RTE_ETH_LINK_SPEED_200G) ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB; return ret; } static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) { - uint32_t eth_link_speed = ETH_SPEED_NUM_NONE; + uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE; switch (hw_link_speed) { case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB: - eth_link_speed = ETH_SPEED_NUM_100M; + eth_link_speed = RTE_ETH_SPEED_NUM_100M; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB: - eth_link_speed = ETH_SPEED_NUM_1G; + eth_link_speed = RTE_ETH_SPEED_NUM_1G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB: - eth_link_speed = ETH_SPEED_NUM_2_5G; + eth_link_speed = RTE_ETH_SPEED_NUM_2_5G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB: - eth_link_speed = ETH_SPEED_NUM_10G; + eth_link_speed = RTE_ETH_SPEED_NUM_10G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB: - eth_link_speed = ETH_SPEED_NUM_20G; + eth_link_speed = RTE_ETH_SPEED_NUM_20G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB: - eth_link_speed = ETH_SPEED_NUM_25G; + eth_link_speed = RTE_ETH_SPEED_NUM_25G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB: - eth_link_speed = ETH_SPEED_NUM_40G; + eth_link_speed = RTE_ETH_SPEED_NUM_40G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: - eth_link_speed = ETH_SPEED_NUM_50G; + eth_link_speed = RTE_ETH_SPEED_NUM_50G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: - eth_link_speed = ETH_SPEED_NUM_100G; + eth_link_speed = RTE_ETH_SPEED_NUM_100G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB: - eth_link_speed = ETH_SPEED_NUM_200G; + eth_link_speed = RTE_ETH_SPEED_NUM_200G; break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: default: @@ -3150,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) { - uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX; + uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX; switch (hw_link_duplex) { case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: /* FALLTHROUGH */ - eth_link_duplex = ETH_LINK_FULL_DUPLEX; + eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX; break; case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: - eth_link_duplex = ETH_LINK_HALF_DUPLEX; + eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX; break; default: PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n", @@ -3188,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) link->link_speed = bnxt_parse_hw_link_speed(link_info->link_speed); else - link->link_speed = ETH_SPEED_NUM_NONE; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex); link->link_status = link_info->link_up; link->link_autoneg = link_info->auto_mode == HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ? - ETH_LINK_FIXED : ETH_LINK_AUTONEG; + RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG; exit: return rc; } @@ -3219,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); if (BNXT_CHIP_P5(bp) && - dev_conf->link_speeds == ETH_LINK_SPEED_40G) { + dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) { /* 40G is not supported as part of media auto detect. * The speed should be forced and autoneg disabled * to configure 40G speed. @@ -3237,15 +3271,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds, bp->link_info->link_signal_mode); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - /* Autoneg can be done only when the FW allows. - * When user configures fixed speed of 40G and later changes to - * any other speed, auto_link_speed/force_link_speed is still set - * to 40G until link comes up at new speed. - */ - if (autoneg == 1 && - !(!BNXT_CHIP_P5(bp) && - (bp->link_info->auto_link_speed || - bp->link_info->force_link_speed))) { + /* Autoneg can be done only when the FW allows. */ + if (autoneg == 1 && bp->link_info->support_auto_speeds) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; link_req.auto_link_speed_mask = @@ -3301,7 +3328,6 @@ error: return rc; } -/* JIRA 22088 */ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) { struct hwrm_func_qcfg_input req = {0}; @@ -3318,8 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) HWRM_CHECK_RESULT(); - /* Hard Coded.. 0xfff VLAN ID mask */ - bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX; svif_info = rte_le_to_cpu_16(resp->svif_info); if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID) @@ -3343,7 +3368,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) } if (mtu) - *mtu = rte_le_to_cpu_16(resp->mtu); + *mtu = rte_le_to_cpu_16(resp->admin_mtu); switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: @@ -3385,23 +3410,13 @@ int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN); bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id); bp->parent->fid = rte_le_to_cpu_16(resp->fid); bp->parent->port_id = rte_le_to_cpu_16(resp->port_id); - /* FIXME: Temporary workaround - remove when firmware issue is fixed. */ - if (bp->parent->vnic == 0) { - PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n"); - /* Use hard-coded values appropriate for current Wh+ fw. */ - if (bp->parent->fid == 2) - bp->parent->vnic = 0x100; - else - bp->parent->vnic = 1; - } - HWRM_UNLOCK(); return 0; @@ -3471,7 +3486,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, uint32_t enables; int rc; - enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU | HWRM_FUNC_CFG_INPUT_ENABLES_MRU | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | @@ -3491,7 +3507,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, } req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); - req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu); req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs); req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs); @@ -3551,7 +3568,7 @@ bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp, struct hwrm_func_cfg_input *req, int num_vfs) { - req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU | HWRM_FUNC_CFG_INPUT_ENABLES_MRU | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | @@ -3562,9 +3579,9 @@ bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); - req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); @@ -4304,8 +4321,20 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, return rc; } -int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, - struct rte_eth_stats *stats, uint8_t rx) +static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr) +{ + /* One of the HW stat values that make up this counter was zero as + * returned by HW in this iteration, so use the previous + * iteration's counter value + */ + if (*prev_cntr && *cntr == 0) + *cntr = *prev_cntr; + else + *prev_cntr = *cntr; +} + +int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx, + struct bnxt_ring_stats *ring_stats, bool rx) { int rc = 0; struct hwrm_stat_ctx_query_input req = {.req_type = 0}; @@ -4320,21 +4349,85 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, HWRM_CHECK_RESULT(); if (rx) { - stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); - stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); - stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts); + struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx]; + + ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts, + &prev_stats->rx_ucast_pkts); + + ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts, + &prev_stats->rx_mcast_pkts); + + ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts, + &prev_stats->rx_bcast_pkts); + + ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes, + &prev_stats->rx_ucast_bytes); + + ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes, + &prev_stats->rx_mcast_bytes); + + ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes, + &prev_stats->rx_bcast_bytes); + + ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts); + bnxt_update_prev_stat(&ring_stats->rx_discard_pkts, + &prev_stats->rx_discard_pkts); + + ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts); + bnxt_update_prev_stat(&ring_stats->rx_error_pkts, + &prev_stats->rx_error_pkts); + + ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts); + bnxt_update_prev_stat(&ring_stats->rx_agg_pkts, + &prev_stats->rx_agg_pkts); + + ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes); + bnxt_update_prev_stat(&ring_stats->rx_agg_bytes, + &prev_stats->rx_agg_bytes); + + ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events); + bnxt_update_prev_stat(&ring_stats->rx_agg_events, + &prev_stats->rx_agg_events); + + ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts); + bnxt_update_prev_stat(&ring_stats->rx_agg_aborts, + &prev_stats->rx_agg_aborts); } else { - stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); - stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx]; + + ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts, + &prev_stats->tx_ucast_pkts); + + ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts, + &prev_stats->tx_mcast_pkts); + + ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts, + &prev_stats->tx_bcast_pkts); + + ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes, + &prev_stats->tx_ucast_bytes); + + ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes, + &prev_stats->tx_mcast_bytes); + + ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes, + &prev_stats->tx_bcast_bytes); + + ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts); + bnxt_update_prev_stat(&ring_stats->tx_discard_pkts, + &prev_stats->tx_discard_pkts); } HWRM_UNLOCK(); @@ -4398,7 +4491,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) req.port_id = bp->pf->port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { unsigned int i; @@ -4579,7 +4672,6 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) return rc; } - int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint16_t dir_ordinal, uint16_t dir_ext, uint16_t dir_attr, const uint8_t *data, @@ -5010,7 +5102,7 @@ static int bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; - uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state; + uint8_t *rxq_state = bp->eth_dev->data->rx_queue_state; struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; struct bnxt_rx_queue **rxqs = bp->rx_queues; uint16_t *ring_tbl = vnic->rss_table; @@ -5044,8 +5136,7 @@ bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) /* Find next active ring. */ for (cnt = 0; cnt < max_rings; cnt++) { - if (rx_queue_state[k] != - RTE_ETH_QUEUE_STATE_STOPPED) + if (rxq_state[k] != RTE_ETH_QUEUE_STATE_STOPPED) break; if (++k == max_rings) k = 0; @@ -5634,11 +5725,11 @@ int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) /* FW returned values are in units of 100msec */ info->driver_polling_freq = rte_le_to_cpu_32(resp->driver_polling_freq) * 100; - info->master_func_wait_period = + info->primary_func_wait_period = rte_le_to_cpu_32(resp->master_func_wait_period) * 100; info->normal_func_wait_period = rte_le_to_cpu_32(resp->normal_func_wait_period) * 100; - info->master_func_wait_period_after_reset = + info->primary_func_wait_period_after_reset = rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100; info->max_bailout_time_after_reset = rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100; @@ -6057,6 +6148,7 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp) int rc = 0; bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->max_resp_len = BNXT_PAGE_SIZE; bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB); @@ -6075,3 +6167,82 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp) return rc; } + +int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr, + uint16_t page_number, uint16_t start_addr, + uint16_t data_length, uint8_t *buf) +{ + struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_input req = {0}; + uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET; + int rc, byte_offset = 0; + + do { + uint16_t xfer_size; + + HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB); + req.i2c_slave_addr = i2c_addr; + req.page_number = rte_cpu_to_le_16(page_number); + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); + + xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); + req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset); + req.data_length = xfer_size; + req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + memcpy(buf + byte_offset, resp->data, xfer_size); + + data_length -= xfer_size; + byte_offset += xfer_size; + + HWRM_UNLOCK(); + } while (data_length > 0); + + return rc; +} + +void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_tx_queue *txq = bp->tx_queues[queue_index]; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_TX, + cpr->cp_ring_struct->fw_ring_id); + txr->tx_raw_prod = 0; + txr->tx_raw_cons = 0; + memset(txr->tx_desc_ring, 0, + txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring)); + memset(txr->tx_buf_ring, 0, + txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring)); + + bnxt_hwrm_stat_ctx_free(bp, cpr); + + bnxt_free_cp_ring(bp, cpr); +} + +int bnxt_hwrm_config_host_mtu(struct bnxt *bp) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + if (!BNXT_PF(bp)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU); + req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +}