X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=d583839cb0ead0926291ca33db4729bae8212872;hb=8fd709a10b25d6d3fe293b3f8343c179d85631df;hp=57d1026f96770b78bfc5641a584397a9667e8c2c;hpb=69975a542616d60e31b324dfa2660b94a61f4a69;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 57d1026f96..d583839cb0 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -27,7 +27,7 @@ #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 #define HWRM_VERSION_1_9_2 0x10903 - +#define HWRM_VERSION_1_10_2_13 0x10a020d struct bnxt_plcmodes_cfg { uint32_t flags; uint16_t jumbo_thresh; @@ -52,7 +52,7 @@ static int page_getenum(size_t size) if (size <= 1 << 30) return 30; PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size); - return sizeof(void *) * 8 - 1; + return sizeof(int) * 8 - 1; } static int page_roundup(size_t size) @@ -64,6 +64,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr, uint64_t *pg_dir) { + if (rmem->nr_pages == 0) + return; + if (rmem->nr_pages > 1) { *pg_attr = 1; *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map); @@ -72,6 +75,82 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, } } +static struct bnxt_cp_ring_info* +bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type) +{ + struct bnxt_cp_ring_info *cp_ring = NULL; + uint16_t i; + + switch (type) { + case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG: + /* FALLTHROUGH */ + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + if (rxq->cp_ring->cp_ring_struct->fw_ring_id == + rte_cpu_to_le_16(rid)) { + return rxq->cp_ring; + } + } + break; + case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + + if (txq->cp_ring->cp_ring_struct->fw_ring_id == + rte_cpu_to_le_16(rid)) { + return txq->cp_ring; + } + } + break; + default: + return cp_ring; + } + return cp_ring; +} + +/* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring. + * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass, + * before timeout, we force the done bit for the cleanup to proceed. + * Also if cpr is null, do nothing.. The HWRM command is not for a + * Tx/Rx/AGG ring cleanup. + */ +static int +bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr, + bool tx, bool rx, bool timeout) +{ + int done = 0; + + if (cpr != NULL) { + if (tx) + done = bnxt_flush_tx_cmp(cpr); + + if (rx) + done = bnxt_flush_rx_cmp(cpr); + + if (done) + PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n", + rx ? "Rx" : "Tx"); + + /* We are about to timeout and still haven't seen the + * HWRM done for the Ring free. Force the cleanup. + */ + if (!done && timeout) { + done = 1; + PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n", + rx ? "Rx" : "Tx"); + } + } else { + /* This HWRM command is not for a Tx/Rx/AGG ring cleanup. + * Otherwise the cpr would have been valid. So do nothing. + */ + done = 1; + } + + return done; +} + /* * HWRM Functions (sent to HWRM) * These are named bnxt_hwrm_*() and return 0 on success or -110 if the @@ -94,6 +173,9 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET; uint16_t mb_trigger_offset = use_kong_mb ? GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER; + struct bnxt_cp_ring_info *cpr = NULL; + bool is_rx = false; + bool is_tx = false; uint32_t timeout; /* Do not send HWRM commands to firmware in error state */ @@ -102,6 +184,11 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, timeout = bp->hwrm_cmd_timeout; + /* Update the message length for backing store config for new FW. */ + if (bp->fw_ver >= HWRM_VERSION_1_10_2_13 && + rte_cpu_to_le_16(req->req_type) == HWRM_FUNC_BACKING_STORE_CFG) + msg_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; + if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; @@ -145,14 +232,42 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, */ rte_io_mb(); + /* Check ring flush is done. + * This is valid only for Tx and Rx rings (including AGG rings). + * The Tx and Rx rings should be freed once the HW confirms all + * the internal buffers and BDs associated with the rings are + * consumed and the corresponding DMA is handled. + */ + if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Check if the TxCQ matches. If that fails check if RxCQ + * matches. And if neither match, is_rx = false, is_tx = false. + */ + cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_TX); + if (cpr == NULL) { + /* Not a TxCQ. Check if the RxCQ matches. */ + cpr = + bnxt_get_ring_info_by_id(bp, req->cmpl_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + if (cpr != NULL) + is_rx = true; + } else { + is_tx = true; + } + } + /* Poll for the valid bit */ for (i = 0; i < timeout; i++) { + int done; + + done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx, + i == timeout - 1); /* Sanity check on the resp->resp_len */ - rte_cio_rmb(); + rte_io_rmb(); if (resp->resp_len && resp->resp_len <= bp->max_resp_len) { /* Last byte of resp contains the valid key */ valid = (uint8_t *)resp + resp->resp_len - 1; - if (*valid == HWRM_RESP_VALID_KEY) + if (*valid == HWRM_RESP_VALID_KEY && done) break; } rte_delay_us(1); @@ -635,9 +750,13 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!BNXT_CHIP_THOR(bp) && - !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) - return 0; + if (BNXT_CHIP_P5(bp)) { + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS)) + return 0; + } else { + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + return 0; + } if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; @@ -646,7 +765,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) if (!ptp) return -ENOMEM; - if (!BNXT_CHIP_THOR(bp)) { + if (!BNXT_CHIP_P5(bp)) { ptp->rx_regs[BNXT_PTP_RX_TS_L] = rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); ptp->rx_regs[BNXT_PTP_RX_TS_H] = @@ -673,10 +792,16 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) return 0; } -void bnxt_hwrm_free_vf_info(struct bnxt *bp) +void bnxt_free_vf_info(struct bnxt *bp) { int i; + if (bp->pf == NULL) + return; + + if (bp->pf->vf_info == NULL) + return; + for (i = 0; i < bp->pf->max_vfs; i++) { rte_free(bp->pf->vf_info[i].vlan_table); bp->pf->vf_info[i].vlan_table = NULL; @@ -687,6 +812,50 @@ void bnxt_hwrm_free_vf_info(struct bnxt *bp) bp->pf->vf_info = NULL; } +static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs) +{ + struct bnxt_child_vf_info *vf_info = bp->pf->vf_info; + int i; + + if (vf_info) + bnxt_free_vf_info(bp); + + vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0); + if (vf_info == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc vf info\n"); + return -ENOMEM; + } + + bp->pf->max_vfs = max_vfs; + for (i = 0; i < max_vfs; i++) { + vf_info[i].fid = bp->pf->first_vf_id + i; + vf_info[i].vlan_table = rte_zmalloc("VF VLAN table", + getpagesize(), getpagesize()); + if (vf_info[i].vlan_table == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i); + goto err; + } + rte_mem_lock_page(vf_info[i].vlan_table); + + vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table", + getpagesize(), getpagesize()); + if (vf_info[i].vlan_as_table == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i); + goto err; + } + rte_mem_lock_page(vf_info[i].vlan_as_table); + + STAILQ_INIT(&vf_info[i].filter); + } + + bp->pf->vf_info = vf_info; + + return 0; +err: + bnxt_free_vf_info(bp); + return -ENOMEM; +} + static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; @@ -694,7 +863,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; uint16_t new_max_vfs; uint32_t flags; - int i; HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); @@ -712,42 +880,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs); new_max_vfs = bp->pdev->max_vfs; if (new_max_vfs != bp->pf->max_vfs) { - if (bp->pf->vf_info) - bnxt_hwrm_free_vf_info(bp); - bp->pf->vf_info = rte_zmalloc("bnxt_vf_info", - sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0); - if (bp->pf->vf_info == NULL) { - PMD_DRV_LOG(ERR, "Alloc vf info fail\n"); - return -ENOMEM; - } - bp->pf->max_vfs = new_max_vfs; - for (i = 0; i < new_max_vfs; i++) { - bp->pf->vf_info[i].fid = - bp->pf->first_vf_id + i; - bp->pf->vf_info[i].vlan_table = - rte_zmalloc("VF VLAN table", - getpagesize(), - getpagesize()); - if (bp->pf->vf_info[i].vlan_table == NULL) - PMD_DRV_LOG(ERR, - "Fail to alloc VLAN table for VF %d\n", - i); - else - rte_mem_lock_page( - bp->pf->vf_info[i].vlan_table); - bp->pf->vf_info[i].vlan_as_table = - rte_zmalloc("VF VLAN AS table", - getpagesize(), - getpagesize()); - if (bp->pf->vf_info[i].vlan_as_table == NULL) - PMD_DRV_LOG(ERR, - "Alloc VLAN AS table for VF %d fail\n", - i); - else - rte_mem_lock_page( - bp->pf->vf_info[i].vlan_as_table); - STAILQ_INIT(&bp->pf->vf_info[i].filter); - } + rc = bnxt_alloc_vf_info(bp, new_max_vfs); + if (rc) + goto unlock; } } @@ -765,7 +900,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); - if (!BNXT_CHIP_THOR(bp)) + if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs) bp->max_l2_ctx += bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { @@ -803,6 +938,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN; + +unlock: HWRM_UNLOCK(); return rc; @@ -813,21 +952,23 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) int rc; rc = __bnxt_hwrm_func_qcaps(bp); + if (rc == -ENOMEM) + return rc; + if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { rc = bnxt_alloc_ctx_mem(bp); if (rc) return rc; + /* On older FW, + * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. + * But the error can be ignored. Return success. + */ rc = bnxt_hwrm_func_resc_qcaps(bp); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; } - /* On older FW, - * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. - * But the error can be ignored. Return success. - */ - return 0; } @@ -835,6 +976,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) { int rc = 0; + uint32_t flags; struct hwrm_vnic_qcaps_input req = {.req_type = 0 }; struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; @@ -846,12 +988,19 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (rte_le_to_cpu_32(resp->flags) & - HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) { + flags = rte_le_to_cpu_32(resp->flags); + + if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) { bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY; PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n"); } + if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP) + bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS; + + if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP) + bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2; + bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported); HWRM_UNLOCK(); @@ -911,14 +1060,6 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd, RTE_MIN(sizeof(req.vf_req_fwd), sizeof(bp->pf->vf_req_fwd))); - - /* - * PF can sniff HWRM API issued by VF. This can be set up by - * linux driver and inherited by the DPDK PF driver. Clear - * this HWRM sniffer list in FW because DPDK PF driver does - * not support this. - */ - flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE; } req.flags = rte_cpu_to_le_32(flags); @@ -939,10 +1080,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); - if (BNXT_VF_IS_TRUSTED(bp)) + if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE); + req.async_event_fwd[2] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ECHO_REQUEST); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1047,21 +1191,19 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) HWRM_CHECK_RESULT_SILENT(); - if (BNXT_VF(bp)) { - bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); - bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); - bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); - bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); - bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); - /* func_resource_qcaps does not return max_rx_em_flows. - * So use the value provided by func_qcaps. - */ - bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); - if (!BNXT_CHIP_THOR(bp)) - bp->max_l2_ctx += bp->max_rx_em_flows; - bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); - bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); - } + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + /* func_resource_qcaps does not return max_rx_em_flows. + * So use the value provided by func_qcaps. + */ + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs) + bp->max_l2_ctx += bp->max_rx_em_flows; + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix); bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); if (bp->vf_resv_strategy > @@ -1098,10 +1240,16 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) else HWRM_CHECK_RESULT(); - PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", + if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) { + rc = -EAGAIN; + goto error; + } + + PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n", resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, - resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); + resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b, + resp->hwrm_fw_rsvd_8b); bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | (resp->hwrm_fw_min_8b << 16) | (resp->hwrm_fw_bld_8b << 8) | @@ -1130,7 +1278,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) if (bp->max_req_len > resp->max_req_win_len) { PMD_DRV_LOG(ERR, "Unsupported request length\n"); rc = -EINVAL; + goto error; } + + bp->chip_num = rte_le_to_cpu_16(resp->chip_num); + bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len); if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) @@ -1139,28 +1291,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) max_resp_len = rte_le_to_cpu_16(resp->max_resp_len); dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg); - if (bp->max_resp_len != max_resp_len) { - sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, - bp->pdev->addr.domain, bp->pdev->addr.bus, - bp->pdev->addr.devid, bp->pdev->addr.function); - - rte_free(bp->hwrm_cmd_resp_addr); - - bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0); - if (bp->hwrm_cmd_resp_addr == NULL) { - rc = -ENOMEM; - goto error; - } - bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { - PMD_DRV_LOG(ERR, - "Unable to map response buffer to physical memory.\n"); - rc = -ENOMEM; - goto error; - } - bp->max_resp_len = max_resp_len; - } + RTE_VERIFY(max_resp_len <= bp->max_resp_len); + bp->max_resp_len = max_resp_len; if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && @@ -1217,6 +1349,11 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS; } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) { + PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n"); + bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN; + } error: HWRM_UNLOCK(); @@ -1240,6 +1377,9 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n", + bp->eth_dev->data->port_id); + return rc; } @@ -1260,16 +1400,25 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) } req.flags = rte_cpu_to_le_32(conf->phy_flags); - req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); - enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; /* * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set * any auto mode, even "none". */ if (!conf->link_speed) { /* No speeds specified. Enable AutoNeg - all speeds */ + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; + } else { + if (bp->link_info->link_signal_mode) { + enables |= + HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED; + req.force_pam4_link_speed = + rte_cpu_to_le_16(conf->link_speed); + } else { + req.force_link_speed = + rte_cpu_to_le_16(conf->link_speed); + } } /* AutoNeg - Advertise speeds specified. */ if (conf->auto_link_speed_mask && @@ -1278,9 +1427,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; req.auto_link_speed_mask = conf->auto_link_speed_mask; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; + if (conf->auto_pam4_link_speeds) { + enables |= + HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK; + req.auto_link_pam4_speed_mask = + conf->auto_pam4_link_speeds; + } else { + enables |= + HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK; + } } + if (conf->auto_link_speed && + !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; @@ -1335,18 +1495,32 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); + link_info->auto_link_speed_mask = rte_le_to_cpu_16(resp->auto_link_speed_mask); link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; - + link_info->link_signal_mode = + rte_le_to_cpu_16(resp->active_fec_signal_mode); + link_info->force_pam4_link_speed = + rte_le_to_cpu_16(resp->force_pam4_link_speed); + link_info->support_pam4_speeds = + rte_le_to_cpu_16(resp->support_pam4_speeds); + link_info->auto_pam4_link_speeds = + rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask); + link_info->module_status = resp->module_status; HWRM_UNLOCK(); PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n", link_info->link_speed, link_info->auto_mode, link_info->auto_link_speed, link_info->auto_link_speed_mask, link_info->support_speeds, link_info->force_link_speed); + PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n", + link_info->link_signal_mode, + link_info->auto_pam4_link_speeds, + link_info->support_pam4_speeds, + link_info->force_pam4_link_speed); return rc; } @@ -1355,6 +1529,7 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp) int rc = 0; struct hwrm_port_phy_qcaps_input req = {0}; struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_link_info *link_info = bp->link_info; if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) return 0; @@ -1363,12 +1538,24 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); bp->port_cnt = resp->port_cnt; + if (resp->supported_speeds_auto_mode) + link_info->support_auto_speeds = + rte_le_to_cpu_16(resp->supported_speeds_auto_mode); + if (resp->supported_pam4_speeds_auto_mode) + link_info->support_pam4_auto_speeds = + rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode); HWRM_UNLOCK(); + /* Older firmware does not have supported_auto_speeds, so assume + * that all supported speeds can be autonegotiated. + */ + if (link_info->auto_link_speed_mask && !link_info->support_auto_speeds) + link_info->support_auto_speeds = link_info->support_speeds; + return 0; } @@ -1520,7 +1707,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, req.ring_type = ring_type; req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); - if (BNXT_CHIP_THOR(bp)) { + if (BNXT_CHIP_P5(bp)) { mb_pool = bp->rx_queues[0]->mb_pool; rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; @@ -1616,18 +1803,24 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, } int bnxt_hwrm_ring_free(struct bnxt *bp, - struct bnxt_ring *ring, uint32_t ring_type) + struct bnxt_ring *ring, uint32_t ring_type, + uint16_t cp_ring_id) { int rc; struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return -EINVAL; + HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); + req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + ring->fw_ring_id = INVALID_HW_RING_ID; if (rc || resp->error_code) { if (rc == 0 && resp->error_code) @@ -1713,7 +1906,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 }; struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) return rc; HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); @@ -1728,13 +1921,15 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) return rc; } -int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { int rc; struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) + return 0; + HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); req.update_period_ms = rte_cpu_to_le_32(0); @@ -1752,13 +1947,15 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return rc; } -int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { int rc; struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) + return 0; + HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1768,6 +1965,8 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); + cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + return rc; } @@ -1890,7 +2089,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB); - if (BNXT_CHIP_THOR(bp)) { + if (BNXT_CHIP_P5(bp)) { int dflt_rxq = vnic->start_grp_id; struct bnxt_rx_ring_info *rxr; struct bnxt_cp_ring_info *cpr; @@ -1921,6 +2120,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id); enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID | HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID; + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) { + enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE; + req.rx_csum_v2_mode = + HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK; + } goto config_mru; } @@ -1961,12 +2165,6 @@ config_mru: if (vnic->bd_stall) req.flags |= rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE); - if (vnic->roce_dual) - req.flags |= rte_cpu_to_le_32( - HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE); - if (vnic->roce_only) - req.flags |= rte_cpu_to_le_32( - HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE); if (vnic->rss_dflt_cr) req.flags |= rte_cpu_to_le_32( HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE); @@ -2014,10 +2212,6 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE; vnic->bd_stall = rte_le_to_cpu_32(resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE; - vnic->roce_dual = rte_le_to_cpu_32(resp->flags) & - HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE; - vnic->roce_only = rte_le_to_cpu_32(resp->flags) & - HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE; vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE; @@ -2080,7 +2274,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc = 0; - if (BNXT_CHIP_THOR(bp)) { + if (BNXT_CHIP_P5(bp)) { int j; for (j = 0; j < vnic->num_lb_ctxts; j++) { @@ -2127,7 +2321,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) } static int -bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int i; int rc = 0; @@ -2171,8 +2365,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, if (!vnic->rss_table) return 0; - if (BNXT_CHIP_THOR(bp)) - return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic); + if (BNXT_CHIP_P5(bp)) + return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); @@ -2237,7 +2431,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; - if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) { + if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) { if (enable) PMD_DRV_LOG(ERR, "No HW support for LRO\n"); return -ENOTSUP; @@ -2418,48 +2612,54 @@ bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) unsigned int i; struct bnxt_cp_ring_info *cpr; - for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { - if (i >= bp->rx_cp_nr_rings) { - cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; - } else { - cpr = bp->rx_queues[i]->cp_ring; - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[i].fw_stats_ctx = -1; - } - if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { - rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); - cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - if (rc) - return rc; - } + cpr = bp->rx_queues[i]->cp_ring; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[i].fw_stats_ctx = -1; + rc = bnxt_hwrm_stat_ctx_free(bp, cpr); + if (rc) + return rc; + } + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + cpr = bp->tx_queues[i]->cp_ring; + rc = bnxt_hwrm_stat_ctx_free(bp, cpr); + if (rc) + return rc; } + return 0; } int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) { + struct bnxt_cp_ring_info *cpr; unsigned int i; int rc = 0; - for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - struct bnxt_tx_queue *txq; - struct bnxt_rx_queue *rxq; - struct bnxt_cp_ring_info *cpr; + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; - if (i >= bp->rx_cp_nr_rings) { - txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; - cpr = txq->cp_ring; - } else { - rxq = bp->rx_queues[i]; - cpr = rxq->cp_ring; + cpr = rxq->cp_ring; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); + if (rc) + return rc; } + } - rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i); + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; - if (rc) - return rc; + cpr = txq->cp_ring; + if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr); + if (rc) + return rc; + } } + return rc; } @@ -2490,12 +2690,11 @@ void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; bnxt_hwrm_ring_free(bp, cp_ring, - HWRM_RING_FREE_INPUT_RING_TYPE_NQ); - cp_ring->fw_ring_id = INVALID_HW_RING_ID; - memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * - sizeof(*cpr->cp_desc_ring)); + HWRM_RING_FREE_INPUT_RING_TYPE_NQ, + INVALID_HW_RING_ID); + memset(cpr->cp_desc_ring, 0, + cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; - cpr->valid = 0; } void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) @@ -2503,12 +2702,11 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; bnxt_hwrm_ring_free(bp, cp_ring, - HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); - cp_ring->fw_ring_id = INVALID_HW_RING_ID; - memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * - sizeof(*cpr->cp_desc_ring)); + HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + memset(cpr->cp_desc_ring, 0, + cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; - cpr->valid = 0; } void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) @@ -2518,60 +2716,55 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) struct bnxt_ring *ring = rxr->rx_ring_struct; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[queue_index].rx_fw_ring_id = - INVALID_HW_RING_ID; - } + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX, + cpr->cp_ring_struct->fw_ring_id); + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + ring = rxr->ag_ring_struct; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - BNXT_CHIP_THOR(bp) ? - HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - if (BNXT_HAS_RING_GRPS(bp)) - bp->grp_info[queue_index].ag_fw_ring_id = - INVALID_HW_RING_ID; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) - bnxt_free_cp_ring(bp, cpr); + bnxt_hwrm_ring_free(bp, ring, + BNXT_CHIP_P5(bp) ? + HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : + HWRM_RING_FREE_INPUT_RING_TYPE_RX, + cpr->cp_ring_struct->fw_ring_id); + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + + bnxt_hwrm_stat_ctx_free(bp, cpr); + + bnxt_free_cp_ring(bp, cpr); if (BNXT_HAS_RING_GRPS(bp)) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } +int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index) +{ + int rc; + struct hwrm_ring_reset_input req = {.req_type = 0 }; + struct hwrm_ring_reset_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_RING_RESET, BNXT_USE_CHIMP_MB); + + req.ring_type = HWRM_RING_RESET_INPUT_RING_TYPE_RX_RING_GRP; + req.ring_id = rte_cpu_to_le_16(bp->grp_info[queue_index].fw_grp_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + static int bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; - for (i = 0; i < bp->tx_cp_nr_rings; i++) { - struct bnxt_tx_queue *txq = bp->tx_queues[i]; - struct bnxt_tx_ring_info *txr = txq->tx_ring; - struct bnxt_ring *ring = txr->tx_ring_struct; - struct bnxt_cp_ring_info *cpr = txq->cp_ring; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_TX); - ring->fw_ring_id = INVALID_HW_RING_ID; - memset(txr->tx_desc_ring, 0, - txr->tx_ring_struct->ring_size * - sizeof(*txr->tx_desc_ring)); - memset(txr->tx_buf_ring, 0, - txr->tx_ring_struct->ring_size * - sizeof(*txr->tx_buf_ring)); - txr->tx_prod = 0; - txr->tx_cons = 0; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } + for (i = 0; i < bp->tx_cp_nr_rings; i++) + bnxt_free_hwrm_tx_ring(bp, i); for (i = 0; i < bp->rx_cp_nr_rings; i++) bnxt_free_hwrm_rx_ring(bp, i); @@ -2617,7 +2810,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); - bp->max_resp_len = HWRM_MAX_RESP_LEN; + bp->max_resp_len = BNXT_PAGE_SIZE; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; @@ -2712,11 +2905,10 @@ bnxt_free_tunnel_ports(struct bnxt *bp) if (bp->vxlan_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN); - bp->vxlan_port = 0; + if (bp->geneve_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id, HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE); - bp->geneve_port = 0; } void bnxt_free_all_hwrm_resources(struct bnxt *bp) @@ -2773,10 +2965,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) { - return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; + return !conf_link; } -static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) +static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + uint16_t pam4_link) { uint16_t eth_link_speed = 0; @@ -2815,16 +3008,18 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; case ETH_LINK_SPEED_50G: - eth_link_speed = + eth_link_speed = pam4_link ? + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB : HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; case ETH_LINK_SPEED_100G: - eth_link_speed = + eth_link_speed = pam4_link ? + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB : HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; break; case ETH_LINK_SPEED_200G: eth_link_speed = - HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB; + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB; break; default: PMD_DRV_LOG(ERR, @@ -2911,7 +3106,7 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) if (link_speed & ETH_LINK_SPEED_100G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB; if (link_speed & ETH_LINK_SPEED_200G) - ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB; + ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB; return ret; } @@ -2985,12 +3180,16 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) int rc = 0; struct bnxt_link_info *link_info = bp->link_info; + rc = bnxt_hwrm_port_phy_qcaps(bp); + if (rc) + PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc); + rc = bnxt_hwrm_port_phy_qcfg(bp, link_info); if (rc) { - PMD_DRV_LOG(ERR, - "Get link config failed with rc %d\n", rc); + PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc); goto exit; } + if (link_info->link_speed) link->link_speed = bnxt_parse_hw_link_speed(link_info->link_speed); @@ -3025,7 +3224,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) goto port_phy_cfg; autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); - if (BNXT_CHIP_THOR(bp) && + if (BNXT_CHIP_P5(bp) && dev_conf->link_speeds == ETH_LINK_SPEED_40G) { /* 40G is not supported as part of media auto detect. * The speed should be forced and autoneg disabled @@ -3035,17 +3234,17 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) autoneg = 0; } - speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); + /* No auto speeds and no auto_pam4_link. Disable autoneg */ + if (bp->link_info->auto_link_speed == 0 && + bp->link_info->link_signal_mode && + bp->link_info->auto_pam4_link_speeds == 0) + autoneg = 0; + + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds, + bp->link_info->link_signal_mode); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - /* Autoneg can be done only when the FW allows. - * When user configures fixed speed of 40G and later changes to - * any other speed, auto_link_speed/force_link_speed is still set - * to 40G until link comes up at new speed. - */ - if (autoneg == 1 && - !(!BNXT_CHIP_THOR(bp) && - (bp->link_info->auto_link_speed || - bp->link_info->force_link_speed))) { + /* Autoneg can be done only when the FW allows. */ + if (autoneg == 1 && bp->link_info->support_auto_speeds) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; link_req.auto_link_speed_mask = @@ -3066,10 +3265,25 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) /* If user wants a particular speed try that first. */ if (speed) link_req.link_speed = speed; + else if (bp->link_info->force_pam4_link_speed) + link_req.link_speed = + bp->link_info->force_pam4_link_speed; + else if (bp->link_info->auto_pam4_link_speeds) + link_req.link_speed = + bp->link_info->auto_pam4_link_speeds; + else if (bp->link_info->support_pam4_speeds) + link_req.link_speed = + bp->link_info->support_pam4_speeds; else if (bp->link_info->force_link_speed) link_req.link_speed = bp->link_info->force_link_speed; else link_req.link_speed = bp->link_info->auto_link_speed; + /* Auto PAM4 link speed is zero, but auto_link_speed is not + * zero. Use the auto_link_speed. + */ + if (bp->link_info->auto_link_speed != 0 && + bp->link_info->auto_pam4_link_speeds == 0) + link_req.link_speed = bp->link_info->auto_link_speed; } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); link_req.auto_pause = bp->link_info->auto_pause; @@ -3086,7 +3300,6 @@ error: return rc; } -/* JIRA 22088 */ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) { struct hwrm_func_qcfg_input req = {0}; @@ -3103,8 +3316,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) HWRM_CHECK_RESULT(); - /* Hard Coded.. 0xfff VLAN ID mask */ - bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX; svif_info = rte_le_to_cpu_16(resp->svif_info); if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID) @@ -3128,7 +3340,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) } if (mtu) - *mtu = rte_le_to_cpu_16(resp->mtu); + *mtu = rte_le_to_cpu_16(resp->admin_mtu); switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: @@ -3142,6 +3354,9 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) break; } + bp->legacy_db_size = + rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024; + HWRM_UNLOCK(); return rc; @@ -3167,7 +3382,7 @@ int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN); bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id); @@ -3176,7 +3391,7 @@ int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp) /* FIXME: Temporary workaround - remove when firmware issue is fixed. */ if (bp->parent->vnic == 0) { - PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n"); + PMD_DRV_LOG(DEBUG, "parent VNIC unavailable.\n"); /* Use hard-coded values appropriate for current Wh+ fw. */ if (bp->parent->fid == 2) bp->parent->vnic = 0x100; @@ -3245,40 +3460,16 @@ int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp) return 0; } -static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg, - struct hwrm_func_qcaps_output *qcaps) -{ - qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs; - memcpy(qcaps->mac_address, fcfg->dflt_mac_addr, - sizeof(qcaps->mac_address)); - qcaps->max_l2_ctxs = fcfg->num_l2_ctxs; - qcaps->max_rx_rings = fcfg->num_rx_rings; - qcaps->max_tx_rings = fcfg->num_tx_rings; - qcaps->max_cmpl_rings = fcfg->num_cmpl_rings; - qcaps->max_stat_ctx = fcfg->num_stat_ctxs; - qcaps->max_vfs = 0; - qcaps->first_vf_id = 0; - qcaps->max_vnics = fcfg->num_vnics; - qcaps->max_decap_records = 0; - qcaps->max_encap_records = 0; - qcaps->max_tx_wm_flows = 0; - qcaps->max_tx_em_flows = 0; - qcaps->max_rx_wm_flows = 0; - qcaps->max_rx_em_flows = 0; - qcaps->max_flow_id = 0; - qcaps->max_mcast_filters = fcfg->num_mcast_filters; - qcaps->max_sp_tx_rings = 0; - qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps; -} - -static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) +static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, + struct bnxt_pf_resource_info *pf_resc) { struct hwrm_func_cfg_input req = {0}; struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables; int rc; - enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + enables = HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU | HWRM_FUNC_CFG_INPUT_ENABLES_MRU | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | @@ -3290,21 +3481,23 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) if (BNXT_HAS_RING_GRPS(bp)) { enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS; - req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); + req.num_hw_ring_grps = + rte_cpu_to_le_16(pf_resc->num_hw_ring_grps); } else if (BNXT_HAS_NQ(bp)) { enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX; req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings); } req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); - req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.admin_mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); + req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu); req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); - req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); - req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); - req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); - req.num_tx_rings = rte_cpu_to_le_16(tx_rings); - req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings); - req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx); + req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs); + req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs); + req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings); + req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings); + req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings); + req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs); req.num_vnics = rte_cpu_to_le_16(bp->max_vnics); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(enables); @@ -3319,11 +3512,45 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) return rc; } -static void populate_vf_func_cfg_req(struct bnxt *bp, - struct hwrm_func_cfg_input *req, - int num_vfs) +/* min values are the guaranteed resources and max values are subject + * to availability. The strategy for now is to keep both min & max + * values the same. + */ +static void +bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp, + struct hwrm_func_vf_resource_cfg_input *req, + int num_vfs) +{ + req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx / + (num_vfs + 1)); + req->min_rsscos_ctx = req->max_rsscos_ctx; + req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); + req->min_stat_ctx = req->max_stat_ctx; + req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings / + (num_vfs + 1)); + req->min_cmpl_rings = req->max_cmpl_rings; + req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1)); + req->min_tx_rings = req->max_tx_rings; + req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1)); + req->min_rx_rings = req->max_rx_rings; + req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1)); + req->min_l2_ctxs = req->max_l2_ctxs; + /* TODO: For now, do not support VMDq/RFS on VFs. */ + req->max_vnics = rte_cpu_to_le_16(1); + req->min_vnics = req->max_vnics; + req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps / + (num_vfs + 1)); + req->min_hw_ring_grps = req->max_hw_ring_grps; + req->flags = + rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED); +} + +static void +bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp, + struct hwrm_func_cfg_input *req, + int num_vfs) { - req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_ADMIN_MTU | HWRM_FUNC_CFG_INPUT_ENABLES_MRU | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | @@ -3334,9 +3561,9 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); - req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req->admin_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); @@ -3352,75 +3579,73 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, (num_vfs + 1)); } -static void add_random_mac_if_needed(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, - int vf) -{ - struct rte_ether_addr mac; - - if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac)) - return; - - if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) { - cfg_req->enables |= - rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); - rte_eth_random_addr(cfg_req->dflt_mac_addr); - bp->pf->vf_info[vf].random_mac = true; - } else { - memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, - RTE_ETHER_ADDR_LEN); - } -} - -static int reserve_resources_from_vf(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, +/* Update the port wide resource values based on how many resources + * got allocated to the VF. + */ +static int bnxt_update_max_resources(struct bnxt *bp, int vf) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; /* Get the actual allocated values now */ - HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc); - copy_func_cfg_to_qcaps(cfg_req, resp); - } else if (resp->error_code) { - rc = rte_le_to_cpu_16(resp->error_code); - PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc); - copy_func_cfg_to_qcaps(cfg_req, resp); - } - - bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx); - bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx); - bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings); - bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings); - bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings); - bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs); - /* - * TODO: While not supporting VMDq with VFs, max_vnics is always - * forced to 1 in this case - */ - //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics); - bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); + bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx); + bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx); + bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings); + bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings); + bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings); + bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx); + bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps); HWRM_UNLOCK(); return 0; } -int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) +/* Update the PF resource values based on how many resources + * got allocated to it. + */ +static int bnxt_update_max_resources_pf_only(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - /* Check for zero MAC address */ + /* Get the actual allocated values now */ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(0xffff); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx); + bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps); + bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics); + + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + /* Check for zero MAC address */ + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_16(resp->vlan); @@ -3430,7 +3655,8 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) return rc; } -static int update_pf_resource_max(struct bnxt *bp) +static int bnxt_query_pf_resources(struct bnxt *bp, + struct bnxt_pf_resource_info *pf_resc) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; @@ -3442,8 +3668,13 @@ static int update_pf_resource_max(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - /* Only TX ring value reflects actual allocation? TODO */ - bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); + pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); + pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx); + pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx); + pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings); + pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings); + pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx); + pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps); bp->pf->evb_mode = resp->evb_mode; HWRM_UNLOCK(); @@ -3451,37 +3682,43 @@ static int update_pf_resource_max(struct bnxt *bp) return rc; } -int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) -{ - int rc; +static void +bnxt_calculate_pf_resources(struct bnxt *bp, + struct bnxt_pf_resource_info *pf_resc, + int num_vfs) +{ + if (!num_vfs) { + pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx; + pf_resc->num_stat_ctxs = bp->max_stat_ctx; + pf_resc->num_cp_rings = bp->max_cp_rings; + pf_resc->num_tx_rings = bp->max_tx_rings; + pf_resc->num_rx_rings = bp->max_rx_rings; + pf_resc->num_l2_ctxs = bp->max_l2_ctx; + pf_resc->num_hw_ring_grps = bp->max_ring_grps; - if (!BNXT_PF(bp)) { - PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); - return -EINVAL; + return; } - rc = bnxt_hwrm_func_qcaps(bp); - if (rc) - return rc; - - bp->pf->func_cfg_flags &= - ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | - HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); - bp->pf->func_cfg_flags |= - HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; - rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); - rc = __bnxt_hwrm_func_qcaps(bp); - return rc; + pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) + + bp->max_rsscos_ctx % (num_vfs + 1); + pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) + + bp->max_stat_ctx % (num_vfs + 1); + pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) + + bp->max_cp_rings % (num_vfs + 1); + pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) + + bp->max_tx_rings % (num_vfs + 1); + pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) + + bp->max_rx_rings % (num_vfs + 1); + pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) + + bp->max_l2_ctx % (num_vfs + 1); + pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) + + bp->max_ring_grps % (num_vfs + 1); } -int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) +int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) { - struct hwrm_func_cfg_input req = {0}; - struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; - int i; - size_t sz; - int rc = 0; - size_t req_buf_sz; + struct bnxt_pf_resource_info pf_resc = { 0 }; + int rc; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); @@ -3489,57 +3726,108 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) } rc = bnxt_hwrm_func_qcaps(bp); - if (rc) return rc; - bp->pf->active_vfs = num_vfs; + bnxt_calculate_pf_resources(bp, &pf_resc, 0); - /* - * First, configure the PF to only use one TX ring. This ensures that - * there are enough rings for all VFs. - * - * If we don't do this, when we call func_alloc() later, we will lock - * extra rings to the PF that won't be available during func_cfg() of - * the VFs. - * - * This has been fixed with firmware versions above 20.6.54 - */ bp->pf->func_cfg_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); bp->pf->func_cfg_flags |= - HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE; - rc = bnxt_hwrm_pf_func_cfg(bp, 1); + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; + + rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc); if (rc) return rc; - /* - * Now, create and register a buffer to hold forwarded VF requests - */ + rc = bnxt_update_max_resources_pf_only(bp); + + return rc; +} + +static int +bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs) +{ + size_t req_buf_sz, sz; + int i, rc; + req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN; bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, page_roundup(num_vfs * HWRM_MAX_REQ_LEN)); if (bp->pf->vf_req_buf == NULL) { - rc = -ENOMEM; - goto error_free; + return -ENOMEM; } + for (sz = 0; sz < req_buf_sz; sz += getpagesize()) rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz); + for (i = 0; i < num_vfs; i++) bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) + - (i * HWRM_MAX_REQ_LEN); + (i * HWRM_MAX_REQ_LEN); - rc = bnxt_hwrm_func_buf_rgtr(bp); + rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs); if (rc) - goto error_free; + rte_free(bp->pf->vf_req_buf); + + return rc; +} - populate_vf_func_cfg_req(bp, &req, num_vfs); +static int +bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_resource_cfg_input req = {0}; + int i, rc = 0; + bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs); bp->pf->active_vfs = 0; for (i = 0; i < num_vfs; i++) { - add_random_mac_if_needed(bp, &req, i); + HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB); + req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid); + rc = bnxt_hwrm_send_message(bp, + &req, + sizeof(req), + BNXT_USE_CHIMP_MB); + if (rc || resp->error_code) { + PMD_DRV_LOG(ERR, + "Failed to initialize VF %d\n", i); + PMD_DRV_LOG(ERR, + "Not all VFs available. (%d, %d)\n", + rc, resp->error_code); + HWRM_UNLOCK(); + + /* If the first VF configuration itself fails, + * unregister the vf_fwd_request buffer. + */ + if (i == 0) + bnxt_hwrm_func_buf_unrgtr(bp); + break; + } + HWRM_UNLOCK(); + + /* Update the max resource values based on the resource values + * allocated to the VF. + */ + bnxt_update_max_resources(bp, i); + bp->pf->active_vfs++; + bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid); + } + + return 0; +} + +static int +bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int i, rc; + + bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs); + bp->pf->active_vfs = 0; + for (i = 0; i < num_vfs; i++) { HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid); @@ -3554,40 +3842,107 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) if (rc || resp->error_code) { PMD_DRV_LOG(ERR, - "Failed to initizlie VF %d\n", i); + "Failed to initialize VF %d\n", i); PMD_DRV_LOG(ERR, "Not all VFs available. (%d, %d)\n", rc, resp->error_code); HWRM_UNLOCK(); + + /* If the first VF configuration itself fails, + * unregister the vf_fwd_request buffer. + */ + if (i == 0) + bnxt_hwrm_func_buf_unrgtr(bp); break; } HWRM_UNLOCK(); - reserve_resources_from_vf(bp, &req, i); + /* Update the max resource values based on the resource values + * allocated to the VF. + */ + bnxt_update_max_resources(bp, i); bp->pf->active_vfs++; bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid); } + return 0; +} + +static void +bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs) +{ + if (bp->flags & BNXT_FLAG_NEW_RM) + bnxt_process_vf_resc_config_new(bp, num_vfs); + else + bnxt_process_vf_resc_config_old(bp, num_vfs); +} + +static void +bnxt_update_pf_resources(struct bnxt *bp, + struct bnxt_pf_resource_info *pf_resc) +{ + bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs; + bp->max_stat_ctx = pf_resc->num_stat_ctxs; + bp->max_cp_rings = pf_resc->num_cp_rings; + bp->max_tx_rings = pf_resc->num_tx_rings; + bp->max_rx_rings = pf_resc->num_rx_rings; + bp->max_ring_grps = pf_resc->num_hw_ring_grps; +} + +static int32_t +bnxt_configure_pf_resources(struct bnxt *bp, + struct bnxt_pf_resource_info *pf_resc) +{ /* - * Now configure the PF to use "the rest" of the resources - * We're using STD_TX_RING_MODE here though which will limit the TX - * rings. This will allow QoS to function properly. Not setting this + * We're using STD_TX_RING_MODE here which will limit the TX + * rings. This will allow QoS to function properly. Not setting this * will cause PF rings to break bandwidth settings. */ - rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + bp->pf->func_cfg_flags &= + ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); + bp->pf->func_cfg_flags |= + HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE; + return bnxt_hwrm_pf_func_cfg(bp, pf_resc); +} + +int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) +{ + struct bnxt_pf_resource_info pf_resc = { 0 }; + int rc; + + if (!BNXT_PF(bp)) { + PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n"); + return -EINVAL; + } + + rc = bnxt_hwrm_func_qcaps(bp); if (rc) - goto error_free; + return rc; - rc = update_pf_resource_max(bp); + bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs); + + rc = bnxt_configure_pf_resources(bp, &pf_resc); if (rc) - goto error_free; + return rc; - return rc; + rc = bnxt_query_pf_resources(bp, &pf_resc); + if (rc) + return rc; -error_free: - bnxt_hwrm_func_buf_unrgtr(bp); - return rc; + /* + * Now, create and register a buffer to hold forwarded VF requests + */ + rc = bnxt_configure_vf_req_buf(bp, num_vfs); + if (rc) + return rc; + + bnxt_configure_vf_resources(bp, num_vfs); + + bnxt_update_pf_resources(bp, &pf_resc); + + return 0; } int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) @@ -3658,6 +4013,18 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) { + bp->vxlan_port = 0; + bp->vxlan_port_cnt = 0; + } + + if (tunnel_type == + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) { + bp->geneve_port = 0; + bp->geneve_port_cnt = 0; + } + return rc; } @@ -3692,23 +4059,24 @@ int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic) return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } -int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs) { - int rc = 0; - struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; + int rc; HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); req.req_buf_num_pages = rte_cpu_to_le_16(1); - req.req_buf_page_size = rte_cpu_to_le_16( - page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN)); + req.req_buf_page_size = + rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); + HWRM_UNLOCK(); return -ENOMEM; } @@ -3935,8 +4303,20 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, return rc; } -int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, - struct rte_eth_stats *stats, uint8_t rx) +static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr) +{ + /* One of the HW stat values that make up this counter was zero as + * returned by HW in this iteration, so use the previous + * iteration's counter value + */ + if (*prev_cntr && *cntr == 0) + *cntr = *prev_cntr; + else + *prev_cntr = *cntr; +} + +int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx, + struct bnxt_ring_stats *ring_stats, bool rx) { int rc = 0; struct hwrm_stat_ctx_query_input req = {.req_type = 0}; @@ -3951,21 +4331,85 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, HWRM_CHECK_RESULT(); if (rx) { - stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); - stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); - stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + struct bnxt_ring_stats *prev_stats = &bp->prev_rx_ring_stats[idx]; + + ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts, + &prev_stats->rx_ucast_pkts); + + ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts, + &prev_stats->rx_mcast_pkts); + + ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts); + bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts, + &prev_stats->rx_bcast_pkts); + + ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes, + &prev_stats->rx_ucast_bytes); + + ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes, + &prev_stats->rx_mcast_bytes); + + ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes); + bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes, + &prev_stats->rx_bcast_bytes); + + ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts); + bnxt_update_prev_stat(&ring_stats->rx_discard_pkts, + &prev_stats->rx_discard_pkts); + + ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts); + bnxt_update_prev_stat(&ring_stats->rx_error_pkts, + &prev_stats->rx_error_pkts); + + ring_stats->rx_agg_pkts = rte_le_to_cpu_64(resp->rx_agg_pkts); + bnxt_update_prev_stat(&ring_stats->rx_agg_pkts, + &prev_stats->rx_agg_pkts); + + ring_stats->rx_agg_bytes = rte_le_to_cpu_64(resp->rx_agg_bytes); + bnxt_update_prev_stat(&ring_stats->rx_agg_bytes, + &prev_stats->rx_agg_bytes); + + ring_stats->rx_agg_events = rte_le_to_cpu_64(resp->rx_agg_events); + bnxt_update_prev_stat(&ring_stats->rx_agg_events, + &prev_stats->rx_agg_events); + + ring_stats->rx_agg_aborts = rte_le_to_cpu_64(resp->rx_agg_aborts); + bnxt_update_prev_stat(&ring_stats->rx_agg_aborts, + &prev_stats->rx_agg_aborts); } else { - stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); - stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + struct bnxt_ring_stats *prev_stats = &bp->prev_tx_ring_stats[idx]; + + ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts, + &prev_stats->tx_ucast_pkts); + + ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts, + &prev_stats->tx_mcast_pkts); + + ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts); + bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts, + &prev_stats->tx_bcast_pkts); + + ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes, + &prev_stats->tx_ucast_bytes); + + ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes, + &prev_stats->tx_mcast_bytes); + + ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes); + bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes, + &prev_stats->tx_bcast_bytes); + + ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts); + bnxt_update_prev_stat(&ring_stats->tx_discard_pkts, + &prev_stats->tx_discard_pkts); } HWRM_UNLOCK(); @@ -4029,7 +4473,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) req.port_id = bp->pf->port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { unsigned int i; @@ -4139,6 +4583,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) return -ENOMEM; dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { + rte_free(buf); PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -4173,6 +4618,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { + rte_free(buf); PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -4208,7 +4654,6 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) return rc; } - int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint16_t dir_ordinal, uint16_t dir_ext, uint16_t dir_attr, const uint8_t *data, @@ -4226,6 +4671,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { + rte_free(buf); PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -4635,10 +5081,9 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, } static int -bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; - uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state; struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; struct bnxt_rx_queue **rxqs = bp->rx_queues; uint16_t *ring_tbl = vnic->rss_table; @@ -4659,7 +5104,7 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) req.ring_grp_tbl_addr = rte_cpu_to_le_64(vnic->rss_table_dma_addr + - i * BNXT_RSS_ENTRIES_PER_CTX_THOR * + i * BNXT_RSS_ENTRIES_PER_CTX_P5 * 2 * sizeof(*ring_tbl)); req.hash_key_tbl_addr = rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); @@ -4672,8 +5117,7 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) /* Find next active ring. */ for (cnt = 0; cnt < max_rings; cnt++) { - if (rx_queue_state[k] != - RTE_ETH_QUEUE_STATE_STOPPED) + if (rxqs[k]->rx_started) break; if (++k == max_rings) k = 0; @@ -4711,37 +5155,35 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) { unsigned int rss_idx, fw_idx, i; - if (!(vnic->rss_table && vnic->hash_type)) + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return 0; - if (BNXT_CHIP_THOR(bp)) - return bnxt_vnic_rss_configure_thor(bp, vnic); - - if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + if (!(vnic->rss_table && vnic->hash_type)) return 0; - if (vnic->rss_table && vnic->hash_type) { - /* - * Fill the RSS hash & redirection table with - * ring group ids for all VNICs - */ - for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; - rss_idx++, fw_idx++) { - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - fw_idx %= bp->rx_cp_nr_rings; - if (vnic->fw_grp_ids[fw_idx] != - INVALID_HW_RING_ID) - break; - fw_idx++; - } - if (i == bp->rx_cp_nr_rings) - return 0; - vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; + if (BNXT_CHIP_P5(bp)) + return bnxt_vnic_rss_configure_p5(bp, vnic); + + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID) + break; + fw_idx++; } - return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + + if (i == bp->rx_cp_nr_rings) + return 0; + + vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; } - return 0; + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -4774,7 +5216,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, req->flags = rte_cpu_to_le_16(flags); } -static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp, +static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req) { struct hwrm_ring_aggint_qcaps_input req = {0}; @@ -4811,8 +5253,8 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, int rc; /* Set ring coalesce parameters only for 100G NICs */ - if (BNXT_CHIP_THOR(bp)) { - if (bnxt_hwrm_set_coal_params_thor(bp, &req)) + if (BNXT_CHIP_P5(bp)) { + if (bnxt_hwrm_set_coal_params_p5(bp, &req)) return -1; } else if (bnxt_stratus_device(bp)) { bnxt_hwrm_set_coal_params(coal, &req); @@ -4841,7 +5283,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) int total_alloc_len; int rc, i, tqm_rings; - if (!BNXT_CHIP_THOR(bp) || + if (!BNXT_CHIP_P5(bp) || bp->hwrm_spec_code < HWRM_VERSION_1_9_2 || BNXT_VF(bp) || bp->ctx) @@ -4896,8 +5338,21 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; - if (!ctx->tqm_fp_rings_count) - ctx->tqm_fp_rings_count = bp->max_q; + ctx->tqm_fp_rings_count = ctx->tqm_fp_rings_count ? + RTE_MIN(ctx->tqm_fp_rings_count, + BNXT_MAX_TQM_FP_LEGACY_RINGS) : + bp->max_q; + + /* Check if the ext ring count needs to be counted. + * Ext ring count is available only with new FW so we should not + * look at the field on older FW. + */ + if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS && + bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) { + ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext; + ctx->tqm_fp_rings_count = RTE_MIN(BNXT_MAX_TQM_FP_RINGS, + ctx->tqm_fp_rings_count); + } tqm_rings = ctx->tqm_fp_rings_count + 1; @@ -5008,6 +5463,18 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables) bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) { + /* DPDK does not need to configure MRAV and TIM type. + * So we are skipping over MRAV and TIM. Skip to configure + * HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8. + */ + ctx_pg = ctx->tqm_mem[BNXT_MAX_TQM_LEGACY_RINGS]; + req.tqm_ring8_num_entries = rte_cpu_to_le_16(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.tqm_ring8_pg_size_tqm_ring_lvl, + &req.tqm_ring8_page_dir); + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -5508,10 +5975,33 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp, return 0; } -int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx) +int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid, + uint16_t *first_vf_id) { - struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_alloc_input req = {0}; + int rc = 0; + struct hwrm_func_qcaps_input req = {.req_type = 0 }; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(fid); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (first_vf_id) + *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp) +{ + struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_pair_alloc_input req = {0}; int rc; if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { @@ -5520,23 +6010,44 @@ int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx) return 0; } - HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB); - req.vf_id = rte_cpu_to_le_16(vf_idx); - snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d", - bp->eth_dev->data->name, vf_idx); + HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB); + req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW; + snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d", + bp->eth_dev->data->name, rep_bp->vf_id); + + req.pf_b_id = rep_bp->parent_pf_idx; + req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) : + rte_cpu_to_le_16(rep_bp->vf_id); + req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid); + req.host_b_id = 1; /* TBD - Confirm if this is OK */ + + req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ? + HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0; + req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ? + HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0; + req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ? + HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0; + req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ? + HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0; + + req.q_ab = rep_bp->rep_q_r2f; + req.q_ba = rep_bp->rep_q_f2r; + req.fc_ab = rep_bp->rep_fc_r2f; + req.fc_ba = rep_bp->rep_fc_f2r; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx); + PMD_DRV_LOG(DEBUG, "%s %d allocated\n", + BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id); return rc; } -int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx) +int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp) { - struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_free_input req = {0}; + struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_pair_free_input req = {0}; int rc; if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { @@ -5545,14 +6056,174 @@ int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx) return 0; } - HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB); - req.vf_id = rte_cpu_to_le_16(vf_idx); - snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d", - bp->eth_dev->data->name, vf_idx); + HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB); + snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d", + bp->eth_dev->data->name, rep_bp->vf_id); + req.pf_b_id = rep_bp->parent_pf_idx; + req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW; + req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) : + rte_cpu_to_le_16(rep_bp->vf_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", + rep_bp->vf_id); + return rc; +} + +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + uint32_t flags = 0; + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT)) + return 0; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2; + else + bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC; + + return rc; +} + +int bnxt_hwrm_fw_echo_reply(struct bnxt *bp, uint32_t echo_req_data1, + uint32_t echo_req_data2) +{ + struct hwrm_func_echo_response_input req = {0}; + struct hwrm_func_echo_response_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + HWRM_PREP(&req, HWRM_FUNC_ECHO_RESPONSE, BNXT_USE_CHIMP_MB); + req.event_data1 = rte_cpu_to_le_32(echo_req_data1); + req.event_data2 = rte_cpu_to_le_32(echo_req_data2); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx); + + return rc; +} + +int bnxt_hwrm_poll_ver_get(struct bnxt *bp) +{ + struct hwrm_ver_get_input req = {.req_type = 0 }; + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + int rc = 0; + + bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->max_resp_len = BNXT_PAGE_SIZE; + bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; + + HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB); + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT_SILENT(); + + if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY) + rc = -EAGAIN; + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr, + uint16_t page_number, uint16_t start_addr, + uint16_t data_length, uint8_t *buf) +{ + struct hwrm_port_phy_i2c_read_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_input req = {0}; + uint32_t enables = HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET; + int rc, byte_offset = 0; + + do { + uint16_t xfer_size; + + HWRM_PREP(&req, HWRM_PORT_PHY_I2C_READ, BNXT_USE_CHIMP_MB); + req.i2c_slave_addr = i2c_addr; + req.page_number = rte_cpu_to_le_16(page_number); + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); + + xfer_size = RTE_MIN(data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); + req.page_offset = rte_cpu_to_le_16(start_addr + byte_offset); + req.data_length = xfer_size; + req.enables = rte_cpu_to_le_32(start_addr + byte_offset ? enables : 0); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + memcpy(buf + byte_offset, resp->data, xfer_size); + + data_length -= xfer_size; + byte_offset += xfer_size; + + HWRM_UNLOCK(); + } while (data_length > 0); + + return rc; +} + +void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_tx_queue *txq = bp->tx_queues[queue_index]; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_TX, + cpr->cp_ring_struct->fw_ring_id); + txr->tx_raw_prod = 0; + txr->tx_raw_cons = 0; + memset(txr->tx_desc_ring, 0, + txr->tx_ring_struct->ring_size * sizeof(*txr->tx_desc_ring)); + memset(txr->tx_buf_ring, 0, + txr->tx_ring_struct->ring_size * sizeof(*txr->tx_buf_ring)); + + bnxt_hwrm_stat_ctx_free(bp, cpr); + + bnxt_free_cp_ring(bp, cpr); +} + +int bnxt_hwrm_config_host_mtu(struct bnxt *bp) +{ + struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + if (!BNXT_PF(bp)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); + + req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU); + req.host_mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; }