X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=c7a6157d91fb9da3de0757b9eb3fbd8b4aebff48;hb=1383434c908922e60ff2637a66c771ead2ddd035;hp=e230b468ee1b76b188762d64daa8b3913a99b49b;hpb=778f6209bc3b39339f2969a57ffb46f2cd396f15;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index e230b468ee..c7a6157d91 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -54,7 +26,8 @@ #include -#define HWRM_CMD_TIMEOUT 2000 +#define HWRM_CMD_TIMEOUT 10000 +#define HWRM_VERSION_1_9_1 0x10901 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -79,7 +52,7 @@ static int page_getenum(size_t size) return 22; if (size <= 1 << 30) return 30; - RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size); + PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size); return sizeof(void *) * 8 - 1; } @@ -95,7 +68,7 @@ static int page_roundup(size_t size) * command was failed by the ChiMP. */ -static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, +static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) { unsigned int i; @@ -161,7 +134,7 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, } if (i >= HWRM_CMD_TIMEOUT) { - RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n", + PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n", req->req_type); goto err_ret; } @@ -171,52 +144,54 @@ err_ret: return -1; } -static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) -{ - int rc; - - rte_spinlock_lock(&bp->hwrm_lock); - rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len); - rte_spinlock_unlock(&bp->hwrm_lock); - return rc; -} - -#define HWRM_PREP(req, type, cr, resp) \ +/* + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * spinlock, and does initial processing. + * + * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It + * releases the spinlock only if it returns. If the regular int return codes + * are not used by the function, HWRM_CHECK_RESULT() should not be used + * directly, rather it should be copied and modified to suit the function. + * + * HWRM_UNLOCK() must be called after all response processing is completed. + */ +#define HWRM_PREP(req, type) do { \ + rte_spinlock_lock(&bp->hwrm_lock); \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ req.req_type = rte_cpu_to_le_16(HWRM_##type); \ - req.cmpl_ring = rte_cpu_to_le_16(cr); \ + req.cmpl_ring = rte_cpu_to_le_16(-1); \ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ req.target_id = rte_cpu_to_le_16(0xffff); \ - req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr) - -#define HWRM_CHECK_RESULT \ - { \ - if (rc) { \ - RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \ - __func__, rc); \ - return rc; \ - } \ - if (resp->error_code) { \ - rc = rte_le_to_cpu_16(resp->error_code); \ - if (resp->resp_len >= 16) { \ - struct hwrm_err_output *tmp_hwrm_err_op = \ - (void *)resp; \ - RTE_LOG(ERR, PMD, \ - "%s error %d:%d:%08x:%04x\n", \ - __func__, \ - rc, tmp_hwrm_err_op->cmd_err, \ - rte_le_to_cpu_32(\ - tmp_hwrm_err_op->opaque_0), \ - rte_le_to_cpu_16(\ - tmp_hwrm_err_op->opaque_1)); \ - } \ - else { \ - RTE_LOG(ERR, PMD, \ - "%s error %d\n", __func__, rc); \ - } \ - return rc; \ + req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ +} while (0) + +#define HWRM_CHECK_RESULT() do {\ + if (rc) { \ + PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + if (resp->resp_len >= 16) { \ + struct hwrm_err_output *tmp_hwrm_err_op = \ + (void *)resp; \ + PMD_DRV_LOG(ERR, \ + "error %d:%d:%08x:%04x\n", \ + rc, tmp_hwrm_err_op->cmd_err, \ + rte_le_to_cpu_32(\ + tmp_hwrm_err_op->opaque_0), \ + rte_le_to_cpu_16(\ + tmp_hwrm_err_op->opaque_1)); \ + } else { \ + PMD_DRV_LOG(ERR, "error %d\n", rc); \ } \ - } + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + +#define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock) int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) { @@ -224,13 +199,14 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mask = 0; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -245,14 +221,14 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; uint32_t mask = 0; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); /* FIXME add multicast flag, when multicast adding options is supported * by ethtool. */ if (vnic->flags & BNXT_VNIC_INFO_BCAST) - mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; if (vnic->flags & BNXT_VNIC_INFO_PROMISC) @@ -266,18 +242,19 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt); req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr); } - if (vlan_count && vlan_table) { - mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_16( - rte_mem_virt2phy(vlan_table)); + if (vlan_table) { + if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; + req.vlan_tag_tbl_addr = rte_cpu_to_le_64( + rte_mem_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } - req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | - mask); + req.mask = rte_cpu_to_le_32(mask); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -306,21 +283,22 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, return 0; } } - HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp); + HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG); req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); + rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } -int bnxt_hwrm_clear_filter(struct bnxt *bp, +int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { int rc = 0; @@ -330,29 +308,50 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp, if (filter->fw_l2_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp); + HWRM_PREP(req, CFA_L2_FILTER_FREE); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); filter->fw_l2_filter_id = -1; return 0; } -int bnxt_hwrm_set_filter(struct bnxt *bp, +int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t dst_id, struct bnxt_filter_info *filter) { int rc = 0; struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; uint32_t enables = 0; + uint16_t j = dst_id - 1; - HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp); + //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ + if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + conf->pool_map[j].pools & (1UL << j)) { + PMD_DRV_LOG(DEBUG, + "Add vlan %u to vmdq pool %u\n", + conf->pool_map[j].vlan_id, j); + + filter->l2_ivlan = conf->pool_map[j].vlan_id; + filter->enables |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + } + + if (filter->fw_l2_filter_id != UINT64_MAX) + bnxt_hwrm_clear_l2_filter(bp, filter); + + HWRM_PREP(req, CFA_L2_FILTER_ALLOC); req.flags = rte_cpu_to_le_32(filter->flags); @@ -371,9 +370,15 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) req.l2_ovlan = filter->l2_ovlan; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) + req.l2_ovlan = filter->l2_ivlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) + req.l2_ovlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) @@ -383,30 +388,115 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct hwrm_port_mac_cfg_input req = {.req_type = 0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_CFG); + + if (ptp->rx_filter) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (ptp->tx_tstamp_en) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + req.flags = rte_cpu_to_le_32(flags); + req.enables = + rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_UNLOCK(); return rc; } +static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0}; + struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + +/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ + if (ptp) + return 0; + + HWRM_PREP(req, PORT_MAC_PTP_QCFG); + + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS)) + return 0; + + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); + if (!ptp) + return -ENOMEM; + + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + + ptp->bp = bp; + bp->ptp_cfg = ptp; + + return 0; +} + int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; uint16_t new_max_vfs; + uint32_t flags; int i; - HWRM_PREP(req, FUNC_QCAPS, -1, resp); + HWRM_PREP(req, FUNC_QCAPS); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + flags = rte_le_to_cpu_32(resp->flags); if (BNXT_PF(bp)) { bp->pf.port_id = resp->port_id; bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); @@ -424,7 +514,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) getpagesize(), getpagesize()); if (bp->pf.vf_info[i].vlan_table == NULL) - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Fail to alloc VLAN table for VF %d\n", i); else @@ -435,7 +525,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) getpagesize(), getpagesize()); if (bp->pf.vf_info[i].vlan_as_table == NULL) - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Alloc VLAN AS table for VF %d fail\n", i); else @@ -463,8 +553,17 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_vnics = 1; } bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { + bp->flags |= BNXT_FLAG_PTP_SUPPORTED; + PMD_DRV_LOG(INFO, "PTP SUPPORTED\n"); + HWRM_UNLOCK(); + bnxt_hwrm_ptp_qcfg(bp); + } + } + + HWRM_UNLOCK(); return rc; } @@ -475,13 +574,14 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) struct hwrm_func_reset_input req = {.req_type = 0 }; struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_RESET, -1, resp); + HWRM_PREP(req, FUNC_RESET); req.enables = rte_cpu_to_le_32(0); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -495,7 +595,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp); + HWRM_PREP(req, FUNC_DRV_RGTR); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; @@ -510,12 +610,18 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) sizeof(bp->pf.vf_req_fwd))); } - req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */ - memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd)); + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | + ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | + ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->flags |= BNXT_FLAG_REGISTERED; @@ -534,27 +640,23 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; - HWRM_PREP(req, VER_GET, -1, resp); + HWRM_PREP(req, VER_GET); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; - /* - * Hold the lock since we may be adjusting the response pointers. - */ - rte_spinlock_lock(&bp->hwrm_lock); - rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); - RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n", + PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd, resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) | (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd; - RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n", + PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); my_version = HWRM_VERSION_MAJOR << 16; @@ -564,30 +666,31 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) fw_version = resp->hwrm_intf_maj << 16; fw_version |= resp->hwrm_intf_min << 8; fw_version |= resp->hwrm_intf_upd; + bp->hwrm_spec_code = fw_version; if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) { - RTE_LOG(ERR, PMD, "Unsupported firmware API version\n"); + PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; goto error; } if (my_version != fw_version) { - RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n"); + PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n"); if (my_version < fw_version) { - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Firmware API version is newer than driver.\n"); - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "The driver may be missing features.\n"); } else { - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Firmware API version is older than driver.\n"); - RTE_LOG(INFO, PMD, + PMD_DRV_LOG(INFO, "Not all driver features may be functional.\n"); } } if (bp->max_req_len > resp->max_req_win_len) { - RTE_LOG(ERR, PMD, "Unsupported request length\n"); + PMD_DRV_LOG(ERR, "Unsupported request length\n"); rc = -EINVAL; } bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); @@ -608,9 +711,9 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); rc = -ENOMEM; goto error; @@ -622,7 +725,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) { - RTE_LOG(DEBUG, PMD, "Short command supported\n"); + PMD_DRV_LOG(DEBUG, "Short command supported\n"); rte_free(bp->hwrm_short_cmd_req_addr); @@ -634,10 +737,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); + rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == 0) { rte_free(bp->hwrm_short_cmd_req_addr); - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unable to map buffer to physical memory.\n"); rc = -ENOMEM; goto error; @@ -647,7 +750,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } error: - rte_spinlock_unlock(&bp->hwrm_lock); + HWRM_UNLOCK(); return rc; } @@ -660,12 +763,13 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) if (!(bp->flags & BNXT_FLAG_REGISTERED)) return 0; - HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp); + HWRM_PREP(req, FUNC_DRV_UNRGTR); req.flags = flags; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->flags &= ~BNXT_FLAG_REGISTERED; @@ -678,34 +782,39 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - uint32_t link_speed_mask = - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; - HWRM_PREP(req, PORT_PHY_CFG, -1, resp); + HWRM_PREP(req, PORT_PHY_CFG); if (conf->link_up) { + /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ + if (bp->link_info.auto_mode && conf->link_speed) { + req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n"); + } + req.flags = rte_cpu_to_le_32(conf->phy_flags); req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; /* * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode = conf->auto_mode; - enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - if (conf->auto_mode == - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { - req.auto_link_speed_mask = - conf->auto_link_speed_mask; - enables |= link_speed_mask; - } - if (bp->link_info.auto_link_speed) { - req.auto_link_speed = - bp->link_info.auto_link_speed; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; - } + /* No speeds specified. Enable AutoNeg - all speeds */ + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; + } + /* AutoNeg - Advertise speeds specified. */ + if (conf->auto_link_speed_mask && + !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) { + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; } + req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; req.auto_pause = conf->auto_pause; @@ -720,12 +829,13 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) } else { req.flags = rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN); - RTE_LOG(INFO, PMD, "Force Link Down\n"); + PMD_DRV_LOG(INFO, "Force Link Down\n"); } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -737,30 +847,44 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, PORT_PHY_QCFG, -1, resp); + HWRM_PREP(req, PORT_PHY_QCFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); link_info->phy_link_status = resp->link; link_info->link_up = (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; link_info->force_pause = resp->force_pause; link_info->auto_mode = resp->auto_mode; + link_info->phy_type = resp->phy_type; + link_info->media_type = resp->media_type; link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); + link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; + HWRM_UNLOCK(); + + PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed); + PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode); + PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds); + PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed); + PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n", + link_info->auto_link_speed_mask); + PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n", + link_info->force_link_speed); + return rc; } @@ -769,12 +893,18 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int rc = 0; struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; - HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp); + HWRM_PREP(req, QUEUE_QPORTCFG); + req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; + /* HWRM Version >= 1.9.1 */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + req.drv_qmap_cap = + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); #define GET_QUEUE_INFO(x) \ bp->cos_queue[x].id = resp->queue_id##x; \ @@ -789,6 +919,22 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) GET_QUEUE_INFO(6); GET_QUEUE_INFO(7); + HWRM_UNLOCK(); + + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { + bp->tx_cosq_id = bp->cos_queue[0].id; + } else { + /* iterate and find the COSq profile to use for Tx */ + for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id = bp->cos_queue[i].id; + break; + } + } + } + PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); + return rc; } @@ -802,7 +948,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct hwrm_ring_alloc_input req = {.req_type = 0 }; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_ALLOC, -1, resp); + HWRM_PREP(req, RING_ALLOC); req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); @@ -812,7 +958,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: - req.queue_id = bp->cos_queue[0].id; + req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); /* FALLTHROUGH */ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; @@ -831,8 +977,9 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; break; default: - RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n", + PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n", ring_type); + HWRM_UNLOCK(); return -1; } req.enables = rte_cpu_to_le_32(enables); @@ -844,24 +991,29 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc cp failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_RX: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc rx failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_TX: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "hwrm_ring_alloc tx failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; default: - RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc); + PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; } } ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id); + HWRM_UNLOCK(); return rc; } @@ -872,7 +1024,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_FREE, -1, resp); + HWRM_PREP(req, RING_FREE); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); @@ -882,25 +1034,27 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, if (rc || resp->error_code) { if (rc == 0 && resp->error_code) rc = rte_le_to_cpu_16(resp->error_code); + HWRM_UNLOCK(); switch (ring_type) { case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: - RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n", rc); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_RX: - RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n", rc); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_TX: - RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n", + PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n", rc); return rc; default: - RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc); + PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc); return rc; } } + HWRM_UNLOCK(); return 0; } @@ -910,7 +1064,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_ALLOC, -1, resp); + HWRM_PREP(req, RING_GRP_ALLOC); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); @@ -919,11 +1073,13 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); + HWRM_UNLOCK(); + return rc; } @@ -933,13 +1089,14 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_FREE, -1, resp); + HWRM_PREP(req, RING_GRP_FREE); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID; return rc; @@ -954,13 +1111,14 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; - HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp); + HWRM_PREP(req, STAT_CTX_CLR_STATS); req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -972,7 +1130,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp); + HWRM_PREP(req, STAT_CTX_ALLOC); req.update_period_ms = rte_cpu_to_le_32(0); @@ -981,10 +1139,12 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); + HWRM_UNLOCK(); + return rc; } @@ -995,13 +1155,14 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_FREE, -1, resp); + HWRM_PREP(req, STAT_CTX_FREE); req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1013,7 +1174,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; /* map ring groups to this vnic */ - RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n", + PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", vnic->start_grp_id, vnic->end_grp_id); for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; @@ -1023,16 +1184,18 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - HWRM_PREP(req, VNIC_ALLOC, -1, resp); + HWRM_PREP(req, VNIC_ALLOC); if (vnic->func_default) - req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); - RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1044,13 +1207,13 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_QCFG); req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); pmode->flags = rte_le_to_cpu_32(resp->flags); /* dflt_vnic bit doesn't exist in the _cfg command */ @@ -1059,6 +1222,8 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset); pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold); + HWRM_UNLOCK(); + return rc; } @@ -1070,7 +1235,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); @@ -1085,7 +1250,8 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1095,11 +1261,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; - uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + uint32_t ctx_enable_flag = 0; struct bnxt_plcmodes_cfg pmodes; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1107,18 +1273,19 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (rc) return rc; - HWRM_PREP(req, VNIC_CFG, -1, resp); + HWRM_PREP(req, VNIC_CFG); /* Only RSS support for now TBD: COS & LB */ req.enables = - rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP | - HWRM_VNIC_CFG_INPUT_ENABLES_MRU); + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP); if (vnic->lb_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; if (vnic->cos_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; - if (vnic->rss_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; + if (vnic->rss_rule != 0xffff) { + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + } req.enables |= rte_cpu_to_le_32(ctx_enable_flag); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); @@ -1147,7 +1314,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes); @@ -1162,10 +1330,10 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_QCFG, -1, resp); + HWRM_PREP(req, VNIC_QCFG); req.enables = rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); @@ -1174,7 +1342,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp); vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule); @@ -1194,6 +1362,8 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE; + HWRM_UNLOCK(); + return rc; } @@ -1204,14 +1374,15 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); - RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); + HWRM_UNLOCK(); + PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } @@ -1224,16 +1395,17 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) bp->hwrm_cmd_resp_addr; if (vnic->rss_rule == 0xffff) { - RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); + PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE); req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); vnic->rss_rule = INVALID_HW_RING_ID; @@ -1247,17 +1419,18 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id); + PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_FREE, -1, resp); + HWRM_PREP(req, VNIC_FREE); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); vnic->fw_vnic_id = INVALID_HW_RING_ID; return rc; @@ -1270,7 +1443,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_CFG, -1, resp); + HWRM_PREP(req, VNIC_RSS_CFG); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); @@ -1282,7 +1455,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1295,7 +1469,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t size; - HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.flags = rte_cpu_to_le_32( HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); @@ -1311,7 +1485,8 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1323,7 +1498,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_TPA_CFG, -1, resp); + HWRM_PREP(req, VNIC_TPA_CFG); if (enable) { req.enables = rte_cpu_to_le_32( @@ -1337,16 +1512,17 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.max_agg_segs = rte_cpu_to_le_16(5); req.max_aggs = rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); req.min_agg_len = rte_cpu_to_le_32(512); } + req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1363,10 +1539,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->pf.vf_info[vf].random_mac = false; @@ -1380,17 +1557,19 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, -1, resp); + HWRM_PREP(req, FUNC_QSTATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); if (dropped) *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts); + HWRM_UNLOCK(); + return rc; } @@ -1401,13 +1580,13 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, -1, resp); + HWRM_PREP(req, FUNC_QSTATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts); stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts); @@ -1428,6 +1607,8 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + HWRM_UNLOCK(); + return rc; } @@ -1437,13 +1618,14 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) struct hwrm_func_clr_stats_input req = {.req_type = 0}; struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_CLR_STATS, -1, resp); + HWRM_PREP(req, FUNC_CLR_STATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1485,19 +1667,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { - if (i >= bp->rx_cp_nr_rings) + if (i >= bp->rx_cp_nr_rings) { cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; - else + } else { cpr = bp->rx_queues[i]->cp_ring; + bp->grp_info[i].fw_stats_ctx = -1; + } if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; - /* - * TODO. Need a better way to reset grp_info.stats_ctx - * for Rx rings only. stats_ctx is not saved for Tx - * in grp_info. - */ - bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; if (rc) return rc; } @@ -1538,12 +1716,8 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) { - if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) { - RTE_LOG(ERR, PMD, - "Attempt to free invalid ring group %d\n", - idx); + if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) continue; - } rc = bnxt_hwrm_ring_grp_free(bp, idx); @@ -1561,7 +1735,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_hwrm_ring_free(bp, cp_ring, HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL); cp_ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID; memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; @@ -1617,10 +1790,17 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) rxr->rx_ring_struct->ring_size * sizeof(*rxr->rx_buf_ring)); rxr->rx_prod = 0; + } + ring = rxr->ag_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; memset(rxr->ag_buf_ring, 0, - rxr->ag_ring_struct->ring_size * - sizeof(*rxr->ag_buf_ring)); + rxr->ag_ring_struct->ring_size * + sizeof(*rxr->ag_buf_ring)); rxr->ag_prod = 0; + bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID; } if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr, idx); @@ -1679,9 +1859,9 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } @@ -1696,9 +1876,39 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - rc = bnxt_hwrm_clear_filter(bp, filter); - if (rc) - break; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + //if (rc) + //break; + } + return rc; +} + +static int +bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + struct rte_flow *flow; + int rc = 0; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + filter = flow->filter; + PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + //if (rc) + //break; } return rc; } @@ -1709,7 +1919,15 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id, + filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, + filter); + else + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter); if (rc) break; } @@ -1730,20 +1948,20 @@ void bnxt_free_tunnel_ports(struct bnxt *bp) void bnxt_free_all_hwrm_resources(struct bnxt *bp) { - struct bnxt_vnic_info *vnic; - unsigned int i; + int i; if (bp->vnic_info == NULL) return; - vnic = &bp->vnic_info[0]; - if (BNXT_PF(bp)) - bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic); - - /* VNIC resources */ - for (i = 0; i < bp->nr_vnics; i++) { + /* + * Cleanup VNICs in reverse order, to make sure the L2 filter + * from vnic0 is last to be cleaned up. + */ + for (i = bp->nr_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + bnxt_clear_hwrm_vnic_flows(bp, vnic); + bnxt_clear_hwrm_vnic_filters(bp, vnic); bnxt_hwrm_vnic_ctx_free(bp, vnic); @@ -1774,6 +1992,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) return hw_link_duplex; } +static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +{ + return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; +} + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) { uint16_t eth_link_speed = 0; @@ -1815,8 +2038,12 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; + case ETH_LINK_SPEED_100G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + break; default: - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported link speed %d; default to AUTO\n", conf_link_speed); break; @@ -1827,9 +2054,9 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ - ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G) + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G) -static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) +static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id) { uint32_t one_speed; @@ -1840,20 +2067,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) one_speed = link_speed & ~ETH_LINK_SPEED_FIXED; if (one_speed & (one_speed - 1)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u\n", link_speed, port_id); return -EINVAL; } if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported advertised speed (%u) for port %u\n", link_speed, port_id); return -EINVAL; } } else { if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Unsupported advertised speeds (%u) for port %u\n", link_speed, port_id); return -EINVAL; @@ -1891,6 +2118,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; if (link_speed & ETH_LINK_SPEED_50G) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; + if (link_speed & ETH_LINK_SPEED_100G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB; return ret; } @@ -1923,9 +2152,12 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: eth_link_speed = ETH_SPEED_NUM_50G; break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: + eth_link_speed = ETH_SPEED_NUM_100G; + break; case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: default: - RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n", + PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n", hw_link_speed); break; } @@ -1945,7 +2177,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) eth_link_duplex = ETH_LINK_HALF_DUPLEX; break; default: - RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n", + PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n", hw_link_duplex); break; } @@ -1959,7 +2191,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) rc = bnxt_hwrm_port_phy_qcfg(bp, link_info); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc); goto exit; } @@ -1982,9 +2214,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) int rc = 0; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_link_info link_req; - uint16_t speed; + uint16_t speed, autoneg; - if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) + if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) return 0; rc = bnxt_valid_link_speed(dev_conf->link_speeds, @@ -1997,20 +2229,36 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) if (!link_up) goto port_phy_cfg; + autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - if (speed == 0) { + /* Autoneg can be done only when the FW allows */ + if (autoneg == 1 && !(bp->link_info.auto_link_speed || + bp->link_info.force_link_speed)) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; - link_req.auto_mode = - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = bnxt_parse_eth_link_speed_mask(bp, dev_conf->link_speeds); } else { + if (bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || + bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE || + bp->link_info.media_type == + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) { + PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n"); + return -EINVAL; + } + link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; - link_req.link_speed = speed; - RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed); + /* If user wants a particular speed try that first. */ + if (speed) + link_req.link_speed = speed; + else if (bp->link_info.force_link_speed) + link_req.link_speed = bp->link_info.force_link_speed; + else + link_req.link_speed = bp->link_info.auto_link_speed; } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); link_req.auto_pause = bp->link_info.auto_pause; @@ -2019,7 +2267,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) port_phy_cfg: rc = bnxt_hwrm_port_phy_cfg(bp, &link_req); if (rc) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Set link config failed with rc %d\n", rc); } @@ -2032,17 +2280,21 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags; int rc = 0; - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); /* Hard Coded.. 0xfff VLAN ID mask */ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + flags = rte_le_to_cpu_16(resp->flags); + if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: @@ -2055,6 +2307,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) break; } + HWRM_UNLOCK(); + return rc; } @@ -2114,10 +2368,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); req.fid = rte_cpu_to_le_16(0xffff); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2183,16 +2439,16 @@ static void reserve_resources_from_vf(struct bnxt *bp, int rc; /* Get the actual allocated values now */ - HWRM_PREP(req, FUNC_QCAPS, -1, resp); + HWRM_PREP(req, FUNC_QCAPS); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { - RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc); copy_func_cfg_to_qcaps(cfg_req, resp); } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); - RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc); copy_func_cfg_to_qcaps(cfg_req, resp); } @@ -2208,6 +2464,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, */ //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics); bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); + + HWRM_UNLOCK(); } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -2217,18 +2475,22 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) int rc; /* Check for zero MAC address */ - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { - RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc); return -1; } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); - RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc); + PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc); return -1; } - return rte_le_to_cpu_16(resp->vlan); + rc = rte_le_to_cpu_16(resp->vlan); + + HWRM_UNLOCK(); + + return rc; } static int update_pf_resource_max(struct bnxt *bp) @@ -2238,15 +2500,17 @@ static int update_pf_resource_max(struct bnxt *bp) int rc; /* And copy the allocated numbers into the pf struct */ - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); /* Only TX ring value reflects actual allocation? TODO */ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); bp->pf.evb_mode = resp->evb_mode; + HWRM_UNLOCK(); + return rc; } @@ -2255,7 +2519,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) int rc; if (!BNXT_PF(bp)) { - RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); return -1; } @@ -2282,7 +2546,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) size_t req_buf_sz; if (!BNXT_PF(bp)) { - RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); + PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); return -1; } @@ -2338,7 +2602,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -2348,14 +2612,17 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); if (rc || resp->error_code) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Failed to initizlie VF %d\n", i); - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "Not all VFs available. (%d, %d)\n", rc, resp->error_code); + HWRM_UNLOCK(); break; } + HWRM_UNLOCK(); + reserve_resources_from_vf(bp, &req, i); bp->pf.active_vfs++; bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid); @@ -2388,14 +2655,15 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); req.evb_mode = bp->pf.evb_mode; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2407,11 +2675,11 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp); + HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC); req.tunnel_type = tunnel_type; req.tunnel_dst_port_val = port; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); switch (tunnel_type) { case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN: @@ -2425,6 +2693,9 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, default: break; } + + HWRM_UNLOCK(); + return rc; } @@ -2435,11 +2706,14 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp); + HWRM_PREP(req, TUNNEL_DST_PORT_FREE); + req.tunnel_type = tunnel_type; req.tunnel_dst_port_id = rte_cpu_to_be_16(port); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2451,11 +2725,14 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2478,23 +2755,24 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp); + HWRM_PREP(req, FUNC_BUF_RGTR); req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr[0] = - rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr[0] == 0) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2505,11 +2783,12 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp); + HWRM_PREP(req, FUNC_BUF_UNRGTR); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2520,7 +2799,8 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(0xffff); req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.enables = rte_cpu_to_le_32( @@ -2528,7 +2808,9 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) req.async_event_cr = rte_cpu_to_le_16( bp->def_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2539,13 +2821,16 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_vf_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_VF_CFG, -1, resp); + HWRM_PREP(req, FUNC_VF_CFG); + req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( bp->def_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2558,7 +2843,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) uint32_t func_cfg_flags; int rc = 0; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); if (is_vf) { dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; @@ -2576,7 +2861,9 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2588,13 +2875,16 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.max_bw = rte_cpu_to_le_32(max_bw); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2605,14 +2895,17 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2627,14 +2920,15 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, REJECT_FWD_RESP, -1, resp); + HWRM_PREP(req, REJECT_FWD_RESP); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2646,13 +2940,17 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN); + + HWRM_UNLOCK(); + return rc; } @@ -2666,50 +2964,55 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, EXEC_FWD_RESP, -1, resp); + HWRM_PREP(req, EXEC_FWD_RESP); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, - struct rte_eth_stats *stats) + struct rte_eth_stats *stats, uint8_t rx) { int rc = 0; struct hwrm_stat_ctx_query_input req = {.req_type = 0}; struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_QUERY, -1, resp); + HWRM_PREP(req, STAT_CTX_QUERY); req.stat_ctx_id = rte_cpu_to_le_32(cid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; - - stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); - stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + HWRM_CHECK_RESULT(); + + if (rx) { + stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + } else { + stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); + } - stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); - stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + HWRM_UNLOCK(); return rc; } @@ -2724,12 +3027,16 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_QSTATS, -1, resp); + HWRM_PREP(req, PORT_QSTATS); + req.port_id = rte_cpu_to_le_16(pf->port_id); req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -2743,10 +3050,14 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_CLR_STATS, -1, resp); + HWRM_PREP(req, PORT_CLR_STATS); + req.port_id = rte_cpu_to_le_16(pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -2759,10 +3070,11 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp)) return 0; - HWRM_PREP(req, PORT_LED_QCAPS, -1, resp); + HWRM_PREP(req, PORT_LED_QCAPS); req.port_id = bp->pf.port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { unsigned int i; @@ -2782,6 +3094,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } + + HWRM_UNLOCK(); + return rc; } @@ -2797,7 +3112,8 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, PORT_LED_CFG, -1, resp); + HWRM_PREP(req, PORT_LED_CFG); + if (led_on) { led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; duration = rte_cpu_to_le_16(500); @@ -2815,8 +3131,171 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, + uint32_t *length) +{ + int rc; + struct hwrm_nvm_get_dir_info_input req = {0}; + struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, NVM_GET_DIR_INFO); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + if (!rc) { + *entries = rte_le_to_cpu_32(resp->entries); + *length = rte_le_to_cpu_32(resp->entry_length); + } + return rc; +} + +int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) +{ + int rc; + uint32_t dir_entries; + uint32_t entry_length; + uint8_t *buf; + size_t buflen; + rte_iova_t dma_handle; + struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr; + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + buflen = dir_entries * entry_length; + buf = rte_malloc("nvm_dir", buflen, 0); + rte_mem_lock_page(buf); + if (buf == NULL) + return -ENOMEM; + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(req, NVM_GET_DIR_ENTRIES); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + + rte_free(buf); + + return rc; +} + +int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + uint32_t offset, uint32_t length, + uint8_t *data) +{ + int rc; + uint8_t *buf; + rte_iova_t dma_handle; + struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; + + buf = rte_malloc("nvm_item", length, 0); + rte_mem_lock_page(buf); + if (!buf) + return -ENOMEM; + + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(req, NVM_READ); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + req.dir_idx = rte_cpu_to_le_16(index); + req.offset = rte_cpu_to_le_32(offset); + req.len = rte_cpu_to_le_32(length); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + if (rc == 0) + memcpy(data, buf, length); + + rte_free(buf); + return rc; +} + +int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) +{ + int rc; + struct hwrm_nvm_erase_dir_entry_input req = {0}; + struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, NVM_ERASE_DIR_ENTRY); + req.dir_idx = rte_cpu_to_le_16(index); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + + +int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + uint16_t dir_ordinal, uint16_t dir_ext, + uint16_t dir_attr, const uint8_t *data, + size_t data_len) +{ + int rc; + struct hwrm_nvm_write_input req = {0}; + struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr; + rte_iova_t dma_handle; + uint8_t *buf; + + HWRM_PREP(req, NVM_WRITE); + + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); + + buf = rte_malloc("nvm_write", data_len, 0); + rte_mem_lock_page(buf); + if (!buf) + return -ENOMEM; + + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + PMD_DRV_LOG(ERR, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + memcpy(buf, data, data_len); + req.host_src_addr = rte_cpu_to_le_64(dma_handle); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + rte_free(buf); return rc; } @@ -2853,28 +3332,34 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, int rc; /* First query all VNIC ids */ - HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids); + HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == 0) { - RTE_LOG(ERR, PMD, + HWRM_UNLOCK(); + PMD_DRV_LOG(ERR, "unable to map VNIC ID table address to physical memory\n"); return -ENOMEM; } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { - RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); + HWRM_UNLOCK(); + PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); return -1; } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); - RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc); + HWRM_UNLOCK(); + PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc); return -1; } + rc = rte_le_to_cpu_32(resp->vnic_id_cnt); + + HWRM_UNLOCK(); - return rte_le_to_cpu_32(resp->vnic_id_cnt); + return rc; } /* @@ -2939,7 +3424,8 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE); @@ -2947,7 +3433,9 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN : HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2995,8 +3483,248 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) } } /* Could not find a default VNIC. */ - RTE_LOG(ERR, PMD, "No default VNIC\n"); + PMD_DRV_LOG(ERR, "No default VNIC\n"); exit: rte_free(vnic_ids); return -1; } + +int bnxt_hwrm_set_em_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_em_filter_id != UINT64_MAX) + bnxt_hwrm_clear_em_filter(bp, filter); + + HWRM_PREP(req, CFA_EM_FLOW_ALLOC); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR) + memcpy(req.dst_macaddr, filter->dst_macaddr, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID) + req.ovlan_vid = filter->l2_ovlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID) + req.ivlan_vid = filter->l2_ivlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_be_16(filter->src_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_be_16(filter->dst_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (filter->fw_em_filter_id == UINT64_MAX) + return 0; + + PMD_DRV_LOG(ERR, "Clear EM filter\n"); + HWRM_PREP(req, CFA_EM_FLOW_FREE); + + req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_em_filter_id = -1; + filter->fw_l2_filter_id = -1; + + return 0; +} + +int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_ntuple_filter_id != UINT64_MAX) + bnxt_hwrm_clear_ntuple_filter(bp, filter); + + HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + ETHER_ADDR_LEN); + //if (enables & + //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) + //memcpy(req.dst_macaddr, filter->dst_macaddr, + //ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK) + req.src_ipaddr_mask[0] = + rte_cpu_to_le_32(filter->src_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK) + req.dst_ipaddr_mask[0] = + rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_le_16(filter->src_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK) + req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_le_16(filter->dst_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK) + req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_free_output *resp = + bp->hwrm_cmd_resp_addr; + + if (filter->fw_ntuple_filter_id == UINT64_MAX) + return 0; + + HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE); + + req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_ntuple_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; + + return 0; +} + +int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + unsigned int rss_idx, fw_idx, i; + + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != + INVALID_HW_RING_ID) + break; + fw_idx++; + } + if (i == bp->rx_cp_nr_rings) + return 0; + vnic->rss_table[rss_idx] = + vnic->fw_grp_ids[fw_idx]; + } + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + return 0; +}