X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=69c83cdab9af6d4a6a05dcc84c910d8a08ecc441;hb=eae0a36249acb365c9c5952b0ed7f1d4b625ab42;hp=0e96d3c4d7edb09374fdd2efb6cc9d49f7584c85;hpb=3320acbf4b45943b4201873e7f6df206fecd2d86;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 0e96d3c4d7..69c83cdab9 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -55,6 +27,8 @@ #include #define HWRM_CMD_TIMEOUT 10000 +#define HWRM_SPEC_CODE_1_8_3 0x10803 +#define HWRM_VERSION_1_9_1 0x10901 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -79,7 +53,7 @@ static int page_getenum(size_t size) return 22; if (size <= 1 << 30) return 30; - RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size); + PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size); return sizeof(void *) * 8 - 1; } @@ -115,7 +89,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, short_input.req_type = rte_cpu_to_le_16(req->req_type); short_input.signature = rte_cpu_to_le_16( - HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD); + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD); short_input.size = rte_cpu_to_le_16(msg_len); short_input.req_addr = rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr); @@ -161,7 +135,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, } if (i >= HWRM_CMD_TIMEOUT) { - RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n", + PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n", req->req_type); goto err_ret; } @@ -192,10 +166,21 @@ err_ret: req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) +#define HWRM_CHECK_RESULT_SILENT() do {\ + if (rc) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + #define HWRM_CHECK_RESULT() do {\ if (rc) { \ - RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \ - __func__, rc); \ + PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ rte_spinlock_unlock(&bp->hwrm_lock); \ return rc; \ } \ @@ -204,18 +189,15 @@ err_ret: if (resp->resp_len >= 16) { \ struct hwrm_err_output *tmp_hwrm_err_op = \ (void *)resp; \ - RTE_LOG(ERR, PMD, \ - "%s error %d:%d:%08x:%04x\n", \ - __func__, \ + PMD_DRV_LOG(ERR, \ + "error %d:%d:%08x:%04x\n", \ rc, tmp_hwrm_err_op->cmd_err, \ rte_le_to_cpu_32(\ tmp_hwrm_err_op->opaque_0), \ rte_le_to_cpu_16(\ tmp_hwrm_err_op->opaque_1)); \ - } \ - else { \ - RTE_LOG(ERR, PMD, \ - "%s error %d\n", __func__, rc); \ + } else { \ + PMD_DRV_LOG(ERR, "error %d\n", rc); \ } \ rte_spinlock_unlock(&bp->hwrm_lock); \ return rc; \ @@ -252,6 +234,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; uint32_t mask = 0; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return rc; + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -277,7 +262,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2phy(vlan_table)); + rte_mem_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -318,7 +303,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); + rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -348,7 +333,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_l2_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -367,8 +352,9 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t j = dst_id - 1; //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ - if (conf->pool_map[j].pools & (1UL << j)) { - RTE_LOG(DEBUG, PMD, + if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + conf->pool_map[j].pools & (1UL << j)) { + PMD_DRV_LOG(DEBUG, "Add vlan %u to vmdq pool %u\n", conf->pool_map[j].vlan_id, j); @@ -402,13 +388,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, req.l2_ovlan = filter->l2_ovlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) - req.l2_ovlan = filter->l2_ivlan; + req.l2_ivlan = filter->l2_ivlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) - req.l2_ovlan_mask = filter->l2_ivlan_mask; + req.l2_ivlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) @@ -426,12 +412,97 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, return rc; } -int bnxt_hwrm_func_qcaps(struct bnxt *bp) +int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct hwrm_port_mac_cfg_input req = {.req_type = 0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_CFG); + + if (ptp->rx_filter) + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + else + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (ptp->tx_tstamp_en) + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + req.flags = rte_cpu_to_le_32(flags); + req.enables = rte_cpu_to_le_32 + (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_UNLOCK(); + + return rc; +} + +static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0}; + struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + +/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ + if (ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_PTP_QCFG); + + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + return 0; + + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); + if (!ptp) + return -ENOMEM; + + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + + ptp->bp = bp; + bp->ptp_cfg = ptp; + + return 0; +} + +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; uint16_t new_max_vfs; + uint32_t flags; int i; HWRM_PREP(req, FUNC_QCAPS); @@ -443,9 +514,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) HWRM_CHECK_RESULT(); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + flags = rte_le_to_cpu_32(resp->flags); if (BNXT_PF(bp)) { bp->pf.port_id = resp->port_id; bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs); new_max_vfs = bp->pdev->max_vfs; if (new_max_vfs != bp->pf.max_vfs) { if (bp->pf.vf_info) @@ -460,7 +533,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) getpagesize(), getpagesize()); if (bp->pf.vf_info[i].vlan_table == NULL) - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Fail to alloc VLAN table for VF %d\n", i); else @@ -471,7 +544,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) getpagesize(), getpagesize()); if (bp->pf.vf_info[i].vlan_as_table == NULL) - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Alloc VLAN AS table for VF %d fail\n", i); else @@ -499,13 +572,35 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_vnics = 1; } bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { + bp->flags |= BNXT_FLAG_PTP_SUPPORTED; + PMD_DRV_LOG(INFO, "PTP SUPPORTED\n"); + HWRM_UNLOCK(); + bnxt_hwrm_ptp_qcfg(bp); + } + } + HWRM_UNLOCK(); return rc; } +int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { + rc = bnxt_hwrm_func_resc_qcaps(bp); + if (!rc) + bp->flags |= BNXT_FLAG_NEW_RM; + } + + return rc; +} + int bnxt_hwrm_func_reset(struct bnxt *bp) { int rc = 0; @@ -542,14 +637,28 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (BNXT_PF(bp)) { req.enables |= rte_cpu_to_le_32( - HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD); + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD); memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd, RTE_MIN(sizeof(req.vf_req_fwd), sizeof(bp->pf.vf_req_fwd))); + + /* + * PF can sniff HWRM API issued by VF. This can be set up by + * linux driver and inherited by the DPDK PF driver. Clear + * this HWRM sniffer list in FW because DPDK PF driver does + * not support this. + */ + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); } - req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */ - memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd)); + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | + ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | + ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -561,6 +670,105 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) return rc; } +int bnxt_hwrm_check_vf_rings(struct bnxt *bp) +{ + if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM))) + return 0; + + return bnxt_hwrm_func_reserve_vf_resc(bp, true); +} + +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) +{ + int rc; + uint32_t flags = 0; + uint32_t enables; + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + + HWRM_PREP(req, FUNC_VF_CFG); + + req.enables = rte_cpu_to_le_32 + (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS); + + req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * + AGG_RING_MULTIPLIER); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings); + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); + if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.enables |= rte_cpu_to_le_32(enables); + req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); + req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); + req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } + + if (test) + flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST; + + req.flags = rte_cpu_to_le_32(flags); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + if (test) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +{ + int rc; + struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_resource_qcaps_input req = {0}; + + HWRM_PREP(req, FUNC_RESOURCE_QCAPS); + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (BNXT_VF(bp)) { + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + } + bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); + if (bp->vf_resv_strategy > + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) + bp->vf_resv_strategy = + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL; + + HWRM_UNLOCK(); + return rc; +} + int bnxt_hwrm_ver_get(struct bnxt *bp) { int rc = 0; @@ -583,46 +791,49 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) HWRM_CHECK_RESULT(); - RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n", - resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd, - resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); - bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) | - (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd; - RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n", + PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, + resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); + bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | + (resp->hwrm_fw_min_8b << 16) | + (resp->hwrm_fw_bld_8b << 8) | + resp->hwrm_fw_rsvd_8b; + PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); my_version = HWRM_VERSION_MAJOR << 16; my_version |= HWRM_VERSION_MINOR << 8; my_version |= HWRM_VERSION_UPDATE; - fw_version = resp->hwrm_intf_maj << 16; - fw_version |= resp->hwrm_intf_min << 8; - fw_version |= resp->hwrm_intf_upd; + fw_version = resp->hwrm_intf_maj_8b << 16; + fw_version |= resp->hwrm_intf_min_8b << 8; + fw_version |= resp->hwrm_intf_upd_8b; + bp->hwrm_spec_code = fw_version; - if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) { - RTE_LOG(ERR, PMD, "Unsupported firmware API version\n"); + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { + PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; goto error; } if (my_version != fw_version) { - RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n"); + PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n"); if (my_version < fw_version) { - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Firmware API version is newer than driver.\n"); - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "The driver may be missing features.\n"); } else { - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Firmware API version is older than driver.\n"); - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Not all driver features may be functional.\n"); } } if (bp->max_req_len > resp->max_req_win_len) { - RTE_LOG(ERR, PMD, "Unsupported request length\n"); + PMD_DRV_LOG(ERR, "Unsupported request length\n"); rc = -EINVAL; } bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); @@ -643,9 +854,9 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); rc = -ENOMEM; goto error; @@ -656,8 +867,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & - HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) { - RTE_LOG(DEBUG, PMD, "Short command supported\n"); + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) { + PMD_DRV_LOG(DEBUG, "Short command supported\n"); rte_free(bp->hwrm_short_cmd_req_addr); @@ -669,10 +880,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); + rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == 0) { rte_free(bp->hwrm_short_cmd_req_addr); - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unable to map buffer to physical memory.\n"); rc = -ENOMEM; goto error; @@ -714,34 +925,39 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - uint32_t link_speed_mask = - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; HWRM_PREP(req, PORT_PHY_CFG); if (conf->link_up) { + /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ + if (bp->link_info.auto_mode && conf->link_speed) { + req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n"); + } + req.flags = rte_cpu_to_le_32(conf->phy_flags); req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; /* * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode = conf->auto_mode; - enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - if (conf->auto_mode == - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { - req.auto_link_speed_mask = - conf->auto_link_speed_mask; - enables |= link_speed_mask; - } - if (bp->link_info.auto_link_speed) { - req.auto_link_speed = - bp->link_info.auto_link_speed; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; - } + /* No speeds specified. Enable AutoNeg - all speeds */ + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; } + /* AutoNeg - Advertise speeds specified. */ + if (conf->auto_link_speed_mask && + !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) { + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; + } + req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; req.auto_pause = conf->auto_pause; @@ -756,7 +972,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) } else { req.flags = rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN); - RTE_LOG(INFO, PMD, "Force Link Down\n"); + PMD_DRV_LOG(INFO, "Force Link Down\n"); } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -785,21 +1001,33 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; link_info->force_pause = resp->force_pause; link_info->auto_mode = resp->auto_mode; + link_info->phy_type = resp->phy_type; + link_info->media_type = resp->media_type; link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); + link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed); + PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode); + PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds); + PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed); + PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n", + link_info->auto_link_speed_mask); + PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n", + link_info->force_link_speed); + return rc; } @@ -808,9 +1036,15 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int rc = 0; struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; HWRM_PREP(req, QUEUE_QPORTCFG); + req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; + /* HWRM Version >= 1.9.1 */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + req.drv_qmap_cap = + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); @@ -830,6 +1064,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) HWRM_UNLOCK(); + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { + bp->tx_cosq_id = bp->cos_queue[0].id; + } else { + /* iterate and find the COSq profile to use for Tx */ + for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id = bp->cos_queue[i].id; + break; + } + } + } + PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); + return rc; } @@ -853,7 +1101,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: - req.queue_id = bp->cos_queue[0].id; + req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); /* FALLTHROUGH */ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; @@ -872,7 +1120,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; break; default: - RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n", + PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n", ring_type); HWRM_UNLOCK(); return -1; @@ -886,22 +1134,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc cp failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_RX: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc rx failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_TX: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc tx failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; default: - RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc); + PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc); HWRM_UNLOCK(); return rc; } @@ -933,19 +1181,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, switch (ring_type) { case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: - RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_RX: - RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n", rc); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_TX: - RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n", rc); return rc; default: - RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc); + PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc); return rc; } } @@ -1039,7 +1287,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); HWRM_UNLOCK(); - bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; return rc; } @@ -1070,10 +1317,11 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; /* map ring groups to this vnic */ - RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n", + PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", vnic->start_grp_id, vnic->end_grp_id); - for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) + for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; @@ -1083,14 +1331,15 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_ALLOC); if (vnic->func_default) - req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); HWRM_UNLOCK(); - RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1104,7 +1353,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_QCFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1132,7 +1381,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_CFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh); req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset); @@ -1160,7 +1409,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct bnxt_plcmodes_cfg pmodes; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1225,7 +1474,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } HWRM_PREP(req, VNIC_QCFG); @@ -1277,7 +1526,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); HWRM_UNLOCK(); - RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); + PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } @@ -1290,7 +1539,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) bp->hwrm_cmd_resp_addr; if (vnic->rss_rule == 0xffff) { - RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); + PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE); @@ -1314,7 +1563,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1341,6 +1590,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_RSS_CFG); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; req.ring_grp_tbl_addr = rte_cpu_to_le_64(vnic->rss_table_dma_addr); @@ -1364,6 +1614,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t size; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.flags = rte_cpu_to_le_32( @@ -1376,7 +1631,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size -= RTE_PKTMBUF_HEADROOM; req.jumbo_thresh = rte_cpu_to_le_16(size); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1407,12 +1662,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.max_agg_segs = rte_cpu_to_le_16(5); req.max_aggs = rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); req.min_agg_len = rte_cpu_to_le_32(512); } + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1497,10 +1752,9 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes); stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts); - - stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts); + stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts); HWRM_UNLOCK(); @@ -1562,19 +1816,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - if (i >= bp->rx_cp_nr_rings) + if (i >= bp->rx_cp_nr_rings) { cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; - else + } else { cpr = bp->rx_queues[i]->cp_ring; + bp->grp_info[i].fw_stats_ctx = -1; + } if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - /* - * TODO. Need a better way to reset grp_info.stats_ctx - * for Rx rings only. stats_ctx is not saved for Tx - * in grp_info. - */ - bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; if (rc) return rc; } @@ -1626,31 +1876,64 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; bnxt_hwrm_ring_free(bp, cp_ring, HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); cp_ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID; memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; } +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->rx_desc_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_desc_ring)); + memset(rxr->rx_buf_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_buf_ring)); + rxr->rx_prod = 0; + } + ring = rxr->ag_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->ag_buf_ring, 0, + rxr->ag_ring_struct->ring_size * + sizeof(*rxr->ag_buf_ring)); + rxr->ag_prod = 0; + bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr); + + bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; +} + int bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; - int rc = 0; for (i = 0; i < bp->tx_cp_nr_rings; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - unsigned int idx = bp->rx_cp_nr_rings + i + 1; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, @@ -1666,53 +1949,15 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) txr->tx_cons = 0; } if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); + bnxt_free_cp_ring(bp, cpr); cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; } } - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[i]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_ring *ring = rxr->rx_ring_struct; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - unsigned int idx = i + 1; + for (i = 0; i < bp->rx_cp_nr_rings; i++) + bnxt_free_hwrm_rx_ring(bp, i); - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID; - memset(rxr->rx_desc_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_desc_ring)); - memset(rxr->rx_buf_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_buf_ring)); - rxr->rx_prod = 0; - memset(rxr->ag_buf_ring, 0, - rxr->ag_ring_struct->ring_size * - sizeof(*rxr->ag_buf_ring)); - rxr->ag_prod = 0; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); - bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } - - /* Default completion ring */ - { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, 0); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } - - return rc; + return 0; } int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) @@ -1752,9 +1997,9 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } @@ -1775,6 +2020,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); else rc = bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); //if (rc) //break; } @@ -1790,7 +2036,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) STAILQ_FOREACH(flow, &vnic->flow_list, next) { filter = flow->filter; - RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type); + PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) @@ -1862,6 +2108,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); } /* Ring resources */ bnxt_free_all_hwrm_rings(bp); @@ -1880,11 +2128,17 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) switch (conf_link_speed) { case ETH_LINK_SPEED_10M_HD: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; } return hw_link_duplex; } +static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +{ + return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; +} + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) { uint16_t eth_link_speed = 0; @@ -1895,6 +2149,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { case ETH_LINK_SPEED_100M: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB; break; @@ -1926,8 +2181,12 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; + case ETH_LINK_SPEED_100G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + break; default: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported link speed %d; default to AUTO\n", conf_link_speed); break; @@ -1938,7 +2197,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ - ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G) + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G) static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id) { @@ -1951,20 +2210,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id) one_speed = link_speed & ~ETH_LINK_SPEED_FIXED; if (one_speed & (one_speed - 1)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u\n", link_speed, port_id); return -EINVAL; } if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported advertised speed (%u) for port %u\n", link_speed, port_id); return -EINVAL; } } else { if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported advertised speeds (%u) for port %u\n", link_speed, port_id); return -EINVAL; @@ -2002,6 +2261,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; if (link_speed & ETH_LINK_SPEED_50G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; + if (link_speed & ETH_LINK_SPEED_100G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB; return ret; } @@ -2034,9 +2295,12 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: eth_link_speed = ETH_SPEED_NUM_50G; break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: + eth_link_speed = ETH_SPEED_NUM_100G; + break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: default: - RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n", + PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n", hw_link_speed); break; } @@ -2050,13 +2314,14 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) switch (hw_link_duplex) { case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: + /* FALLTHROUGH */ eth_link_duplex = ETH_LINK_FULL_DUPLEX; break; case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: eth_link_duplex = ETH_LINK_HALF_DUPLEX; break; default: - RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n", + PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n", hw_link_duplex); break; } @@ -2070,7 +2335,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) rc = bnxt_hwrm_port_phy_qcfg(bp, link_info); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc); goto exit; } @@ -2093,9 +2358,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) int rc = 0; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_link_info link_req; - uint16_t speed; + uint16_t speed, autoneg; - if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) return 0; rc = bnxt_valid_link_speed(dev_conf->link_speeds, @@ -2108,20 +2373,36 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) if (!link_up) goto port_phy_cfg; + autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - if (speed == 0) { + /* Autoneg can be done only when the FW allows */ + if (autoneg == 1 && !(bp->link_info.auto_link_speed || + bp->link_info.force_link_speed)) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; - link_req.auto_mode = - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = bnxt_parse_eth_link_speed_mask(bp, dev_conf->link_speeds); } else { + if (bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || + bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE || + bp->link_info.media_type == + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) { + PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n"); + return -EINVAL; + } + link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; - link_req.link_speed = speed; - RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed); + /* If user wants a particular speed try that first. */ + if (speed) + link_req.link_speed = speed; + else if (bp->link_info.force_link_speed) + link_req.link_speed = bp->link_info.force_link_speed; + else + link_req.link_speed = bp->link_info.auto_link_speed; } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); link_req.auto_pause = bp->link_info.auto_pause; @@ -2130,7 +2411,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) port_phy_cfg: rc = bnxt_hwrm_port_phy_cfg(bp, &link_req); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Set link config failed with rc %d\n", rc); } @@ -2143,6 +2424,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags; int rc = 0; HWRM_PREP(req, FUNC_QCFG); @@ -2154,11 +2436,15 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) /* Hard Coded.. 0xfff VLAN ID mask */ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + flags = rte_le_to_cpu_16(resp->flags); + if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: + /* FALLTHROUGH */ bp->port_partition_type = resp->port_partition_type; break; default: @@ -2216,7 +2502,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2253,9 +2540,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -2303,11 +2592,11 @@ static void reserve_resources_from_vf(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { - RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc); copy_func_cfg_to_qcaps(cfg_req, resp); } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); - RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc); copy_func_cfg_to_qcaps(cfg_req, resp); } @@ -2338,11 +2627,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { - RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc); return -1; } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); - RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc); return -1; } rc = rte_le_to_cpu_16(resp->vlan); @@ -2378,7 +2667,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) int rc; if (!BNXT_PF(bp)) { - RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); return -1; } @@ -2405,7 +2694,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) size_t req_buf_sz; if (!BNXT_PF(bp)) { - RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); return -1; } @@ -2471,9 +2760,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); if (rc || resp->error_code) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to initizlie VF %d\n", i); - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Not all VFs available. (%d, %d)\n", rc, resp->error_code); HWRM_UNLOCK(); @@ -2620,10 +2909,10 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); - req.req_buf_page_addr[0] = - rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); - if (req.req_buf_page_addr[0] == 0) { - RTE_LOG(ERR, PMD, + req.req_buf_page_addr0 = + rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); + if (req.req_buf_page_addr0 == 0) { + PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; } @@ -2769,6 +3058,18 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) return rc; } +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp) +{ + int rc; + + if (BNXT_PF(bp)) + rc = bnxt_hwrm_func_cfg_def_cp(bp); + else + rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); + + return rc; +} + int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, void *encaped, size_t ec_size) { @@ -2883,9 +3184,6 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) - return 0; - HWRM_PREP(req, PORT_QSTATS); req.port_id = rte_cpu_to_le_16(pf->port_id); @@ -2906,7 +3204,9 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + /* Not allowed on NS2 device, NPAR, MultiHost, VF */ + if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) || + BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; HWRM_PREP(req, PORT_CLR_STATS); @@ -3025,7 +3325,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) uint32_t entry_length; uint8_t *buf; size_t buflen; - phys_addr_t dma_handle; + rte_iova_t dma_handle; struct hwrm_nvm_get_dir_entries_input req = {0}; struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr; @@ -3043,9 +3343,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } @@ -3070,7 +3370,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, { int rc; uint8_t *buf; - phys_addr_t dma_handle; + rte_iova_t dma_handle; struct hwrm_nvm_read_input req = {0}; struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; @@ -3079,9 +3379,9 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } @@ -3124,7 +3424,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, int rc; struct hwrm_nvm_write_input req = {0}; struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr; - phys_addr_t dma_handle; + rte_iova_t dma_handle; uint8_t *buf; HWRM_PREP(req, NVM_WRITE); @@ -3140,9 +3440,9 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2phy(buf); + dma_handle = rte_mem_virt2iova(buf); if (dma_handle == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } @@ -3195,23 +3495,23 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == 0) { HWRM_UNLOCK(); - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map VNIC ID table address to physical memory\n"); return -ENOMEM; } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { HWRM_UNLOCK(); - RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); return -1; } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); HWRM_UNLOCK(); - RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc); return -1; } rc = rte_le_to_cpu_32(resp->vnic_id_cnt); @@ -3342,7 +3642,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) } } /* Could not find a default VNIC. */ - RTE_LOG(ERR, PMD, "No default VNIC\n"); + PMD_DRV_LOG(ERR, "No default VNIC\n"); exit: rte_free(vnic_ids); return -1; @@ -3432,7 +3732,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) if (filter->fw_em_filter_id == UINT64_MAX) return 0; - RTE_LOG(ERR, PMD, "Clear EM filter\n"); + PMD_DRV_LOG(ERR, "Clear EM filter\n"); HWRM_PREP(req, CFA_EM_FLOW_FREE); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); @@ -3442,8 +3742,8 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_em_filter_id = -1; - filter->fw_l2_filter_id = -1; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -3554,8 +3854,86 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_ntuple_filter_id = -1; - filter->fw_l2_filter_id = -1; + filter->fw_ntuple_filter_id = UINT64_MAX; + + return 0; +} +int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + unsigned int rss_idx, fw_idx, i; + + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != + INVALID_HW_RING_ID) + break; + fw_idx++; + } + if (i == bp->rx_cp_nr_rings) + return 0; + vnic->rss_table[rss_idx] = + vnic->fw_grp_ids[fw_idx]; + } + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + return 0; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + uint16_t flags; + + req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr_during_int = + rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int); + + req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max); + + /* min timer set to 1/2 of interrupt timer */ + req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min); + + /* buf timer set to 1/4 of interrupt timer */ + req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr); + + req->cmpl_aggr_dma_tmr_during_int = + rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int); + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + req->flags = rte_cpu_to_le_16(flags); +} + +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* Set ring coalesce parameters only for Stratus 100G NIC */ + if (!bnxt_stratus_device(bp)) + return 0; + + HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS); + bnxt_hwrm_set_coal_params(coal, &req); + req.ring_id = rte_cpu_to_le_16(ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return 0; }