X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=e5f8fda9a34ba12e3c8ed9ebbf0665a2029eab58;hb=9be7bc29b36c6f9b5ea9919f0f104605d3cbfbc3;hp=ba8e44a9bef0f333000fddab3d450ab9165f0a0b;hpb=82dbef2c853be4162d672529710c3959e15993e1;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index ba8e44a9be..e5f8fda9a3 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -26,9 +26,11 @@ #include -#define HWRM_CMD_TIMEOUT 10000 +#define HWRM_CMD_TIMEOUT 6000000 +#define HWRM_SHORT_CMD_TIMEOUT 50000 #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 +#define HWRM_VERSION_1_9_2 0x10903 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -62,6 +64,18 @@ static int page_roundup(size_t size) return 1 << page_getenum(size); } +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, + uint8_t *pg_attr, + uint64_t *pg_dir) +{ + if (rmem->nr_pages > 1) { + *pg_attr = 1; + *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map); + } else { + *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]); + } +} + /* * HWRM Functions (sent to HWRM) * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() @@ -70,7 +84,7 @@ static int page_roundup(size_t size) */ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, - uint32_t msg_len) + uint32_t msg_len, bool use_kong_mb) { unsigned int i; struct input *req = msg; @@ -80,11 +94,27 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint8_t *valid; uint16_t max_req_len = bp->max_req_len; struct hwrm_short_input short_input = { 0 }; + uint16_t bar_offset = use_kong_mb ? + GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET; + uint16_t mb_trigger_offset = use_kong_mb ? + GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER; + uint32_t timeout; + + /* Do not send HWRM commands to firmware in error state */ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return 0; + + /* For VER_GET command, set timeout as 50ms */ + if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + timeout = HWRM_SHORT_CMD_TIMEOUT; + else + timeout = HWRM_CMD_TIMEOUT; - if (bp->flags & BNXT_FLAG_SHORT_CMD) { + if (bp->flags & BNXT_FLAG_SHORT_CMD || + msg_len > bp->max_req_len) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; - memset(short_cmd_req, 0, bp->max_req_len); + memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len); memcpy(short_cmd_req, req, msg_len); short_input.req_type = rte_cpu_to_le_16(req->req_type); @@ -97,52 +127,56 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, data = (uint32_t *)&short_input; msg_len = sizeof(short_input); - /* Sync memory write before updating doorbell */ - rte_wmb(); - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; } /* Write request msg to hwrm channel */ for (i = 0; i < msg_len; i += 4) { - bar = (uint8_t *)bp->bar0 + i; + bar = (uint8_t *)bp->bar0 + bar_offset + i; rte_write32(*data, bar); data++; } /* Zero the rest of the request space */ for (; i < max_req_len; i += 4) { - bar = (uint8_t *)bp->bar0 + i; + bar = (uint8_t *)bp->bar0 + bar_offset + i; rte_write32(0, bar); } /* Ring channel doorbell */ - bar = (uint8_t *)bp->bar0 + 0x100; + bar = (uint8_t *)bp->bar0 + mb_trigger_offset; rte_write32(1, bar); + /* + * Make sure the channel doorbell ring command complete before + * reading the response to avoid getting stale or invalid + * responses. + */ + rte_io_mb(); /* Poll for the valid bit */ - for (i = 0; i < HWRM_CMD_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { /* Sanity check on the resp->resp_len */ - rte_rmb(); - if (resp->resp_len && resp->resp_len <= - bp->max_resp_len) { + rte_cio_rmb(); + if (resp->resp_len && resp->resp_len <= bp->max_resp_len) { /* Last byte of resp contains the valid key */ valid = (uint8_t *)resp + resp->resp_len - 1; if (*valid == HWRM_RESP_VALID_KEY) break; } - rte_delay_us(600); + rte_delay_us(1); } - if (i >= HWRM_CMD_TIMEOUT) { - PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n", - req->req_type); - goto err_ret; + if (i >= timeout) { + /* Suppress VER_GET timeout messages during reset recovery */ + if (bp->flags & BNXT_FLAG_FW_RESET && + rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + return -ETIMEDOUT; + + PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n", + req->req_type); + return -ETIMEDOUT; } return 0; - -err_ret: - return -1; } /* @@ -156,20 +190,43 @@ err_ret: * * HWRM_UNLOCK() must be called after all response processing is completed. */ -#define HWRM_PREP(req, type) do { \ +#define HWRM_PREP(req, type, kong) do { \ rte_spinlock_lock(&bp->hwrm_lock); \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ req.req_type = rte_cpu_to_le_16(HWRM_##type); \ req.cmpl_ring = rte_cpu_to_le_16(-1); \ - req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ + req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ + rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ req.target_id = rte_cpu_to_le_16(0xffff); \ req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) +#define HWRM_CHECK_RESULT_SILENT() do {\ + if (rc) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + #define HWRM_CHECK_RESULT() do {\ if (rc) { \ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ + rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ if (resp->error_code) { \ @@ -188,6 +245,16 @@ err_ret: PMD_DRV_LOG(ERR, "error %d\n", rc); \ } \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ + rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ } while (0) @@ -200,11 +267,11 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, CFA_L2_SET_RX_MASK); + HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mask = 0; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -225,7 +292,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return rc; - HWRM_PREP(req, CFA_L2_SET_RX_MASK); + HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); /* FIXME add multicast flag, when multicast adding options is supported @@ -255,7 +322,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, } req.mask = rte_cpu_to_le_32(mask); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -287,14 +354,14 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, return 0; } } - HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG); + HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -306,17 +373,30 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { int rc = 0; + struct bnxt_filter_info *l2_filter = filter; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; if (filter->fw_l2_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_L2_FILTER_FREE); + if (filter->matching_l2_fltr_ptr) + l2_filter = filter->matching_l2_fltr_ptr; + + PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", + filter, l2_filter, l2_filter->l2_ref_cnt); + + if (l2_filter->l2_ref_cnt > 0) + l2_filter->l2_ref_cnt--; + + if (l2_filter->l2_ref_cnt > 0) + return 0; + + HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -355,7 +435,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, if (filter->fw_l2_filter_id != UINT64_MAX) bnxt_hwrm_clear_l2_filter(bp, filter); - HWRM_PREP(req, CFA_L2_FILTER_ALLOC); + HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); @@ -366,31 +446,36 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR) memcpy(req.l2_addr, filter->l2_addr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) memcpy(req.l2_addr_mask, filter->l2_addr_mask, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) req.l2_ovlan = filter->l2_ovlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) - req.l2_ovlan = filter->l2_ivlan; + req.l2_ivlan = filter->l2_ivlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) - req.l2_ovlan_mask = filter->l2_ivlan_mask; + req.l2_ivlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) req.src_type = filter->src_type; + if (filter->pri_hint) { + req.pri_hint = filter->pri_hint; + req.l2_filter_id_hint = + rte_cpu_to_le_64(filter->l2_filter_id_hint); + } req.enables = rte_cpu_to_le_32(enables); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -410,7 +495,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (!ptp) return 0; - HWRM_PREP(req, PORT_MAC_CFG); + HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB); if (ptp->rx_filter) flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; @@ -427,7 +512,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_UNLOCK(); return rc; @@ -444,39 +529,45 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) if (ptp) return 0; - HWRM_PREP(req, PORT_MAC_PTP_QCFG); + HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(bp->pf.port_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + if (!BNXT_CHIP_THOR(bp) && + !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) return 0; + if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) + bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); if (!ptp) return -ENOMEM; - ptp->rx_regs[BNXT_PTP_RX_TS_L] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); - ptp->rx_regs[BNXT_PTP_RX_TS_H] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); - ptp->rx_regs[BNXT_PTP_RX_SEQ] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); - ptp->rx_regs[BNXT_PTP_RX_FIFO] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); - ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); - ptp->tx_regs[BNXT_PTP_TX_TS_L] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); - ptp->tx_regs[BNXT_PTP_TX_TS_H] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); - ptp->tx_regs[BNXT_PTP_TX_SEQ] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); - ptp->tx_regs[BNXT_PTP_TX_FIFO] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + if (!BNXT_CHIP_THOR(bp)) { + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + } ptp->bp = bp; bp->ptp_cfg = ptp; @@ -493,11 +584,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) uint32_t flags; int i; - HWRM_PREP(req, FUNC_QCAPS); + HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -544,12 +635,16 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } bp->fw_fid = rte_le_to_cpu_32(resp->fid); - memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN); + memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN); bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { if (bp->pf.max_vfs) @@ -564,12 +659,27 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { bp->flags |= BNXT_FLAG_PTP_SUPPORTED; - PMD_DRV_LOG(INFO, "PTP SUPPORTED\n"); + PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n"); HWRM_UNLOCK(); bnxt_hwrm_ptp_qcfg(bp); } } + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED) + bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { + bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); + } else { + bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY; + } + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) + bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + else + bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD; + HWRM_UNLOCK(); return rc; @@ -581,6 +691,10 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) rc = __bnxt_hwrm_func_qcaps(bp); if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { + rc = bnxt_alloc_ctx_mem(bp); + if (rc) + return rc; + rc = bnxt_hwrm_func_resc_qcaps(bp); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; @@ -595,11 +709,11 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) struct hwrm_func_reset_input req = {.req_type = 0 }; struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_RESET); + HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(0); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -610,13 +724,24 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) int bnxt_hwrm_func_driver_register(struct bnxt *bp) { int rc; + uint32_t flags = 0; struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - HWRM_PREP(req, FUNC_DRV_RGTR); + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; + + /* PFs and trusted VFs should indicate the support of the + * Master capability on non Stingray platform + */ + if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp)) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT; + + HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; @@ -636,21 +761,32 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) * this HWRM sniffer list in FW because DPDK PF driver does * not support this. */ - req.flags = - rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE; } + req.flags = rte_cpu_to_le_32(flags); + req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | - ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | + ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); + if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY) + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) + bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE; + HWRM_UNLOCK(); bp->flags |= BNXT_FLAG_REGISTERED; @@ -658,32 +794,76 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) return rc; } -int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp) +int bnxt_hwrm_check_vf_rings(struct bnxt *bp) +{ + if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM))) + return 0; + + return bnxt_hwrm_func_reserve_vf_resc(bp, true); +} + +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) { int rc; + uint32_t flags = 0; + uint32_t enables; struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_vf_cfg_input req = {0}; - HWRM_PREP(req, FUNC_VF_CFG); + HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); - req.enables = rte_cpu_to_le_32 - (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | - HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | - HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | - HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | - HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS; + + if (BNXT_HAS_RING_GRPS(bp)) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS; + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + } req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * AGG_RING_MULTIPLIER); - req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + - bp->tx_nr_rings); - req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); + req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); + if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); + req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); + req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + if (test) + flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST; + + if (test && BNXT_HAS_RING_GRPS(bp)) + flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST; + + req.flags = rte_cpu_to_le_32(flags); + req.enables |= rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (test) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); - HWRM_CHECK_RESULT(); HWRM_UNLOCK(); return rc; } @@ -694,10 +874,10 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; - HWRM_PREP(req, FUNC_RESOURCE_QCAPS); + HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -707,10 +887,21 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + /* func_resource_qcaps does not return max_rx_em_flows. + * So use the value provided by func_qcaps. + */ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); } + bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix); + bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); + if (bp->vf_resv_strategy > + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) + bp->vf_resv_strategy = + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL; HWRM_UNLOCK(); return rc; @@ -721,22 +912,24 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) int rc = 0; struct hwrm_ver_get_input req = {.req_type = 0 }; struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; - uint32_t my_version; uint32_t fw_version; uint16_t max_resp_len; char type[RTE_MEMZONE_NAMESIZE]; uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; - HWRM_PREP(req, VER_GET); + HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + if (bp->flags & BNXT_FLAG_FW_RESET) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, @@ -749,10 +942,6 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); - my_version = HWRM_VERSION_MAJOR << 16; - my_version |= HWRM_VERSION_MINOR << 8; - my_version |= HWRM_VERSION_UPDATE; - fw_version = resp->hwrm_intf_maj_8b << 16; fw_version |= resp->hwrm_intf_min_8b << 8; fw_version |= resp->hwrm_intf_upd_8b; @@ -764,27 +953,16 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) goto error; } - if (my_version != fw_version) { - PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n"); - if (my_version < fw_version) { - PMD_DRV_LOG(INFO, - "Firmware API version is newer than driver.\n"); - PMD_DRV_LOG(INFO, - "The driver may be missing features.\n"); - } else { - PMD_DRV_LOG(INFO, - "Firmware API version is older than driver.\n"); - PMD_DRV_LOG(INFO, - "Not all driver features may be functional.\n"); - } - } - if (bp->max_req_len > resp->max_req_win_len) { PMD_DRV_LOG(ERR, "Unsupported request length\n"); rc = -EINVAL; } bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len); - max_resp_len = resp->max_resp_len; + bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len); + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; + + max_resp_len = rte_le_to_cpu_16(resp->max_resp_len); dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg); if (bp->max_resp_len != max_resp_len) { @@ -802,7 +980,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == 0) { + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); rc = -ENOMEM; @@ -816,11 +994,22 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) { PMD_DRV_LOG(DEBUG, "Short command supported\n"); + bp->flags |= BNXT_FLAG_SHORT_CMD; + } + + if (((dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) || + bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) { + sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); rte_free(bp->hwrm_short_cmd_req_addr); - bp->hwrm_short_cmd_req_addr = rte_malloc(type, - bp->max_req_len, 0); + bp->hwrm_short_cmd_req_addr = + rte_malloc(type, bp->hwrm_max_ext_req_len, 0); if (bp->hwrm_short_cmd_req_addr == NULL) { rc = -ENOMEM; goto error; @@ -828,15 +1017,26 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); - if (bp->hwrm_short_cmd_req_dma_addr == 0) { + if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, "Unable to map buffer to physical memory.\n"); rc = -ENOMEM; goto error; } - - bp->flags |= BNXT_FLAG_SHORT_CMD; + } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) { + bp->flags |= BNXT_FLAG_KONG_MB_EN; + PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n"); + } + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n"); + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) { + bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT; + PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n"); } error: @@ -853,16 +1053,14 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) if (!(bp->flags & BNXT_FLAG_REGISTERED)) return 0; - HWRM_PREP(req, FUNC_DRV_UNRGTR); + HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); req.flags = flags; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - bp->flags &= ~BNXT_FLAG_REGISTERED; - return rc; } @@ -873,7 +1071,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - HWRM_PREP(req, PORT_PHY_CFG); + HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB); if (conf->link_up) { /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ @@ -922,7 +1120,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) PMD_DRV_LOG(INFO, "Force Link Down\n"); } - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -937,9 +1135,9 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, PORT_PHY_QCFG); + HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -985,14 +1183,14 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; int i; - HWRM_PREP(req, QUEUE_QPORTCFG); + HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; /* HWRM Version >= 1.9.1 */ if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) req.drv_qmap_cap = HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1023,6 +1221,13 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) } } } + + bp->max_tc = resp->max_configurable_queues; + bp->max_lltc = resp->max_configurable_lossless_queues; + if (bp->max_tc > BNXT_MAX_QUEUE) + bp->max_tc = BNXT_MAX_QUEUE; + bp->max_q = bp->max_tc; + PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); return rc; @@ -1037,8 +1242,10 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, uint32_t enables = 0; struct hwrm_ring_alloc_input req = {.req_type = 0 }; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct rte_mempool *mb_pool; + uint16_t rx_buf_size; - HWRM_PREP(req, RING_ALLOC); + HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB); req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); @@ -1048,51 +1255,99 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + req.ring_type = ring_type; + req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); - /* FALLTHROUGH */ + if (stats_ctx_id != INVALID_STATS_CTX_ID) + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + break; case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); - req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); + if (BNXT_CHIP_THOR(bp)) { + mb_pool = bp->rx_queues[0]->mb_pool; + rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - + RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); + req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID; + } if (stats_ctx_id != INVALID_STATS_CTX_ID) enables |= - HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; break; case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: req.ring_type = ring_type; - /* - * TODO: Some HWRM versions crash with - * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL - */ + if (BNXT_HAS_NQ(bp)) { + /* Association of cp ring with nq */ + req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id); + enables |= + HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID; + } req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: + req.ring_type = ring_type; + req.page_size = BNXT_PAGE_SHFT; + req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: + req.ring_type = ring_type; + req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id); + + mb_pool = bp->rx_queues[0]->mb_pool; + rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - + RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); + req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); + + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); + enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID | + HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID | + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; + break; default: PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n", ring_type); HWRM_UNLOCK(); - return -1; + return -EINVAL; } req.enables = rte_cpu_to_le_32(enables); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc || resp->error_code) { if (rc == 0 && resp->error_code) rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { - case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: PMD_DRV_LOG(ERR, "hwrm_ring_alloc cp failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; - case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: PMD_DRV_LOG(ERR, - "hwrm_ring_alloc rx failed. rc:%d\n", rc); + "hwrm_ring_alloc rx failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; - case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc rx agg failed. rc:%d\n", + rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: PMD_DRV_LOG(ERR, - "hwrm_ring_alloc tx failed. rc:%d\n", rc); + "hwrm_ring_alloc tx failed. rc:%d\n", rc); + HWRM_UNLOCK(); + return rc; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ: + PMD_DRV_LOG(ERR, + "hwrm_ring_alloc nq failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; default: @@ -1114,12 +1369,12 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_FREE); + HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc || resp->error_code) { if (rc == 0 && resp->error_code) @@ -1139,6 +1394,14 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n", rc); return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_NQ: + PMD_DRV_LOG(ERR, + "hwrm_ring_free nq failed. rc:%d\n", rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG: + PMD_DRV_LOG(ERR, + "hwrm_ring_free agg failed. rc:%d\n", rc); + return rc; default: PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc); return rc; @@ -1154,14 +1417,14 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_ALLOC); + HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id); req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1179,11 +1442,11 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_FREE); + HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1201,11 +1464,11 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; - HWRM_PREP(req, STAT_CTX_CLR_STATS); + HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); - req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1220,18 +1483,18 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_ALLOC); + HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); req.update_period_ms = rte_cpu_to_le_32(0); req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); + cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id); HWRM_UNLOCK(); @@ -1245,11 +1508,11 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_FREE); + HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB); - req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1263,23 +1526,29 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_alloc_input req = { 0 }; struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + if (!BNXT_HAS_RING_GRPS(bp)) + goto skip_ring_grps; + /* map ring groups to this vnic */ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", vnic->start_grp_id, vnic->end_grp_id); - for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) + for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; - vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE; - HWRM_PREP(req, VNIC_ALLOC); + +skip_ring_grps: + vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; + HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB); if (vnic->func_default) req.flags = rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1297,11 +1566,11 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_QCFG); + HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1325,7 +1594,12 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_CFG); + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + + HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); @@ -1338,7 +1612,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID ); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1351,8 +1625,9 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_plcmodes_cfg pmodes = { 0 }; uint32_t ctx_enable_flag = 0; - struct bnxt_plcmodes_cfg pmodes; + uint32_t enables = 0; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); @@ -1363,29 +1638,48 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (rc) return rc; - HWRM_PREP(req, VNIC_CFG); + HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB); + + if (BNXT_CHIP_THOR(bp)) { + struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + req.default_rx_ring_id = + rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id); + req.default_cmpl_ring_id = + rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id); + enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID | + HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID; + goto config_mru; + } /* Only RSS support for now TBD: COS & LB */ - req.enables = - rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP); + enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP; if (vnic->lb_rule != 0xffff) ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; if (vnic->cos_rule != 0xffff) ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; - if (vnic->rss_rule != 0xffff) { + if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) { ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU; ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; } - req.enables |= rte_cpu_to_le_32(ctx_enable_flag); - req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + enables |= ctx_enable_flag; req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule); req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule); req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule); + +config_mru: + req.enables = rte_cpu_to_le_32(enables); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mru = rte_cpu_to_le_16(vnic->mru); - if (vnic->func_default) + /* Configure default VNIC only once. */ + if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) { req.flags |= rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT); + bp->flags |= BNXT_FLAG_DFLT_VNIC_SET; + } if (vnic->vlan_strip) req.flags |= rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE); @@ -1402,7 +1696,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) req.flags |= rte_cpu_to_le_32( HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1423,14 +1717,14 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_QCFG); + HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.vf_id = rte_cpu_to_le_16(fw_vf_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1457,47 +1751,74 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, return rc; } -int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, + struct bnxt_vnic_info *vnic, uint16_t ctx_idx) { int rc = 0; + uint16_t ctx_id; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC); - - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + if (!BNXT_HAS_RING_GRPS(bp)) + vnic->fw_grp_ids[ctx_idx] = ctx_id; + else if (ctx_idx == 0) + vnic->rss_rule = ctx_id; + HWRM_UNLOCK(); - PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } -int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static +int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, + struct bnxt_vnic_info *vnic, uint16_t ctx_idx) { int rc = 0; struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 }; struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - if (vnic->rss_rule == 0xffff) { + if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) { PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); - req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule); + req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - vnic->rss_rule = INVALID_HW_RING_ID; + return rc; +} + +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + + if (BNXT_CHIP_THOR(bp)) { + int j; + + for (j = 0; j < vnic->num_lb_ctxts; j++) { + rc = _bnxt_hwrm_vnic_ctx_free(bp, + vnic, + vnic->fw_grp_ids[j]); + vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + vnic->num_lb_ctxts = 0; + } else { + rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule); + vnic->rss_rule = INVALID_HW_RING_ID; + } return rc; } @@ -1513,16 +1834,55 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } - HWRM_PREP(req, VNIC_FREE); + HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); vnic->fw_vnic_id = INVALID_HW_RING_ID; + /* Configure default VNIC again if necessary. */ + if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) + bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET; + + return rc; +} + +static int +bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int i; + int rc = 0; + int nr_ctxs = vnic->num_lb_ctxts; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + for (i = 0; i < nr_ctxs; i++) { + HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; + + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr + + i * HW_HASH_INDEX_SIZE); + req.ring_table_pair_index = i; + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + } + return rc; } @@ -1533,7 +1893,13 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_CFG); + if (!vnic->rss_table) + return 0; + + if (BNXT_CHIP_THOR(bp)) + return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic); + + HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); req.hash_mode_flags = vnic->hash_mode; @@ -1543,8 +1909,9 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, req.hash_key_tbl_addr = rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1565,7 +1932,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, return rc; } - HWRM_PREP(req, VNIC_PLCMODES_CFG); + HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32( HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); @@ -1575,11 +1942,12 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); size -= RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); req.jumbo_thresh = rte_cpu_to_le_16(size); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1594,7 +1962,15 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_TPA_CFG); + if (BNXT_CHIP_THOR(bp)) + return 0; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); + return 0; + } + + HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); if (enable) { req.enables = rte_cpu_to_le_32( @@ -1615,7 +1991,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, } req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1635,9 +2011,9 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1653,11 +2029,11 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS); + HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1676,11 +2052,11 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS); + HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1713,11 +2089,11 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) struct hwrm_func_clr_stats_input req = {.req_type = 0}; struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_CLR_STATS); + HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -1766,7 +2142,8 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; } else { cpr = bp->rx_queues[i]->cp_ring; - bp->grp_info[i].fw_stats_ctx = -1; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[i].fw_stats_ctx = -1; } if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i); @@ -1809,6 +2186,9 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) uint16_t idx; uint32_t rc = 0; + if (!BNXT_HAS_RING_GRPS(bp)) + return 0; + for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) { if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) @@ -1822,7 +2202,20 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + + bnxt_hwrm_ring_free(bp, cp_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_NQ); + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * + sizeof(*cpr->cp_desc_ring)); + cpr->cp_raw_cons = 0; + cpr->valid = 0; +} + +void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -1832,6 +2225,7 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; + cpr->valid = 0; } void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) @@ -1845,7 +2239,9 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) bnxt_hwrm_ring_free(bp, ring, HWRM_RING_FREE_INPUT_RING_TYPE_RX); ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].rx_fw_ring_id = + INVALID_HW_RING_ID; memset(rxr->rx_desc_ring, 0, rxr->rx_ring_struct->ring_size * sizeof(*rxr->rx_desc_ring)); @@ -1857,18 +2253,26 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) ring = rxr->ag_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, + BNXT_CHIP_THOR(bp) ? + HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG : HWRM_RING_FREE_INPUT_RING_TYPE_RX); ring->fw_ring_id = INVALID_HW_RING_ID; memset(rxr->ag_buf_ring, 0, rxr->ag_ring_struct->ring_size * sizeof(*rxr->ag_buf_ring)); rxr->ag_prod = 0; - bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].ag_fw_ring_id = + INVALID_HW_RING_ID; } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr); + if (rxq->nq_ring) + bnxt_free_nq_ring(bp, rxq->nq_ring); + } - bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; + if (BNXT_HAS_RING_GRPS(bp)) + bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } int bnxt_free_all_hwrm_rings(struct bnxt *bp) @@ -1897,6 +2301,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr); cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; + if (txq->nq_ring) + bnxt_free_nq_ring(bp, txq->nq_ring); } } @@ -1911,6 +2317,9 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) uint16_t i; uint32_t rc = 0; + if (!BNXT_HAS_RING_GRPS(bp)) + return 0; + for (i = 0; i < bp->rx_cp_nr_rings; i++) { rc = bnxt_hwrm_ring_grp_alloc(bp, i); if (rc) @@ -1944,7 +2353,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == 0) { + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -1966,6 +2375,8 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); else rc = bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); + bnxt_free_filter(bp, filter); //if (rc) //break; } @@ -1979,9 +2390,10 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct rte_flow *flow; int rc = 0; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { + while (!STAILQ_EMPTY(&vnic->flow_list)) { + flow = STAILQ_FIRST(&vnic->flow_list); filter = flow->filter; - PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); + PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type); if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) @@ -2041,9 +2453,13 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) * Cleanup VNICs in reverse order, to make sure the L2 filter * from vnic0 is last to be cleaned up. */ - for (i = bp->nr_vnics - 1; i >= 0; i--) { + for (i = bp->max_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + // If the VNIC ID is invalid we are not currently using the VNIC + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + continue; + bnxt_clear_hwrm_vnic_flows(bp, vnic); bnxt_clear_hwrm_vnic_filters(bp, vnic); @@ -2053,6 +2469,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); } /* Ring resources */ bnxt_free_all_hwrm_rings(bp); @@ -2317,11 +2735,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) goto port_phy_cfg; autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); + if (BNXT_CHIP_THOR(bp) && + dev_conf->link_speeds == ETH_LINK_SPEED_40G) { + /* 40G is not supported as part of media auto detect. + * The speed should be forced and autoneg disabled + * to configure 40G speed. + */ + PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n"); + autoneg = 0; + } + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - /* Autoneg can be done only when the FW allows */ - if (autoneg == 1 && !(bp->link_info.auto_link_speed || - bp->link_info.force_link_speed)) { + /* Autoneg can be done only when the FW allows. + * When user configures fixed speed of 40G and later changes to + * any other speed, auto_link_speed/force_link_speed is still set + * to 40G until link comes up at new speed. + */ + if (autoneg == 1 && + !(!BNXT_CHIP_THOR(bp) && + (bp->link_info.auto_link_speed || + bp->link_info.force_link_speed))) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; link_req.auto_link_speed_mask = @@ -2363,17 +2797,17 @@ error: } /* JIRA 22088 */ -int bnxt_hwrm_func_qcfg(struct bnxt *bp) +int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t flags; int rc = 0; - HWRM_PREP(req, FUNC_QCFG); + HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -2383,6 +2817,18 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; + if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags |= BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n"); + } else if (BNXT_VF(bp) && + !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n"); + } + + if (mtu) + *mtu = resp->mtu; + switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: @@ -2430,22 +2876,31 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) { struct hwrm_func_cfg_input req = {0}; struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables; int rc; - req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU | - HWRM_FUNC_CFG_INPUT_ENABLES_MRU | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | - HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU | + HWRM_FUNC_CFG_INPUT_ENABLES_MRU | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS; + + if (BNXT_HAS_RING_GRPS(bp)) { + enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS; + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); + } else if (BNXT_HAS_NQ(bp)) { + enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX; + req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings); + } + req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); - req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE * + req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); @@ -2454,12 +2909,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings); req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx); req.num_vnics = rte_cpu_to_le_16(bp->max_vnics); - req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); req.fid = rte_cpu_to_le_16(0xffff); + req.enables = rte_cpu_to_le_32(enables); - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2482,11 +2937,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS | HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); - req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE * + req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); - req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE * + req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); @@ -2506,7 +2961,7 @@ static void add_random_mac_if_needed(struct bnxt *bp, struct hwrm_func_cfg_input *cfg_req, int vf) { - struct ether_addr mac; + struct rte_ether_addr mac; if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac)) return; @@ -2514,10 +2969,11 @@ static void add_random_mac_if_needed(struct bnxt *bp, if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) { cfg_req->enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); - eth_random_addr(cfg_req->dflt_mac_addr); + rte_eth_random_addr(cfg_req->dflt_mac_addr); bp->pf.vf_info[vf].random_mac = true; } else { - memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN); + memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, + RTE_ETHER_ADDR_LEN); } } @@ -2530,9 +2986,9 @@ static void reserve_resources_from_vf(struct bnxt *bp, int rc; /* Get the actual allocated values now */ - HWRM_PREP(req, FUNC_QCAPS); + HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc) { PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc); @@ -2566,17 +3022,10 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) int rc; /* Check for zero MAC address */ - HWRM_PREP(req, FUNC_QCFG); + HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc); - return -1; - } else if (resp->error_code) { - rc = rte_le_to_cpu_16(resp->error_code); - PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc); - return -1; - } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_16(resp->vlan); HWRM_UNLOCK(); @@ -2591,9 +3040,9 @@ static int update_pf_resource_max(struct bnxt *bp) int rc; /* And copy the allocated numbers into the pf struct */ - HWRM_PREP(req, FUNC_QCFG); + HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); /* Only TX ring value reflects actual allocation? TODO */ @@ -2611,7 +3060,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); - return -1; + return -EINVAL; } rc = bnxt_hwrm_func_qcaps(bp); @@ -2624,6 +3073,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) bp->pf.func_cfg_flags |= HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); + rc = __bnxt_hwrm_func_qcaps(bp); return rc; } @@ -2638,7 +3088,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); - return -1; + return -EINVAL; } rc = bnxt_hwrm_func_qcaps(bp); @@ -2693,10 +3143,13 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, + &req, + sizeof(req), + BNXT_USE_CHIMP_MB); /* Clear enable flag for next pass */ req.enables &= ~rte_cpu_to_le_32( @@ -2746,13 +3199,13 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); req.evb_mode = bp->pf.evb_mode; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2766,10 +3219,10 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC); + HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_val = port; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); switch (tunnel_type) { @@ -2797,11 +3250,11 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_FREE); + HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_id = rte_cpu_to_be_16(port); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2816,11 +3269,11 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2846,7 +3299,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_RGTR); + HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( @@ -2854,13 +3307,13 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); - if (req.req_buf_page_addr0 == 0) { + if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; } - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2874,9 +3327,12 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_UNRGTR); + if (!(BNXT_PF(bp) && bp->pdev->max_vfs)) + return 0; + + HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2890,15 +3346,15 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2912,13 +3368,13 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_vf_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_VF_CFG); + HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32( - HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); + HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2934,7 +3390,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) uint32_t func_cfg_flags; int rc = 0; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); if (is_vf) { dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; @@ -2951,7 +3407,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2966,13 +3422,13 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.max_bw = rte_cpu_to_le_32(max_bw); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -2986,14 +3442,14 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3023,12 +3479,12 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, REJECT_FWD_RESP); + HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3037,20 +3493,20 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, } int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, - struct ether_addr *mac) + struct rte_ether_addr *mac) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_QCFG); + HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN); + memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN); HWRM_UNLOCK(); @@ -3067,12 +3523,12 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, EXEC_FWD_RESP); + HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3087,11 +3543,11 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, struct hwrm_stat_ctx_query_input req = {.req_type = 0}; struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_QUERY); + HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cid); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3111,7 +3567,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); } @@ -3127,12 +3582,12 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - HWRM_PREP(req, PORT_QSTATS); + HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3152,10 +3607,10 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; - HWRM_PREP(req, PORT_CLR_STATS); + HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3172,9 +3627,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp)) return 0; - HWRM_PREP(req, PORT_LED_QCAPS); + HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); req.port_id = bp->pf.port_id; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3214,7 +3669,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, PORT_LED_CFG); + HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB); if (led_on) { led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; @@ -3232,7 +3687,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) led_cfg->led_group_id = bp->leds[i].led_group_id; } - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3247,17 +3702,16 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, struct hwrm_nvm_get_dir_info_input req = {0}; struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_GET_DIR_INFO); + HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); - if (!rc) { - *entries = rte_le_to_cpu_32(resp->entries); - *length = rte_le_to_cpu_32(resp->entry_length); - } + *entries = rte_le_to_cpu_32(resp->entries); + *length = rte_le_to_cpu_32(resp->entry_length); + + HWRM_UNLOCK(); return rc; } @@ -3287,22 +3741,21 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) if (buf == NULL) return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_GET_DIR_ENTRIES); + HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -3323,23 +3776,24 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_READ); + HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); req.dir_idx = rte_cpu_to_le_16(index); req.offset = rte_cpu_to_le_32(offset); req.len = rte_cpu_to_le_32(length); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc == 0) memcpy(data, buf, length); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -3349,9 +3803,9 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) struct hwrm_nvm_erase_dir_entry_input req = {0}; struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_ERASE_DIR_ENTRY); + HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); req.dir_idx = rte_cpu_to_le_16(index); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3370,34 +3824,34 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, rte_iova_t dma_handle; uint8_t *buf; - HWRM_PREP(req, NVM_WRITE); - - req.dir_type = rte_cpu_to_le_16(dir_type); - req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); - req.dir_ext = rte_cpu_to_le_16(dir_ext); - req.dir_attr = rte_cpu_to_le_16(dir_attr); - req.dir_data_length = rte_cpu_to_le_32(data_len); - buf = rte_malloc("nvm_write", data_len, 0); rte_mem_lock_page(buf); if (!buf) return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } memcpy(buf, data, data_len); - req.host_src_addr = rte_cpu_to_le_64(dma_handle); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB); + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); + req.host_src_addr = rte_cpu_to_le_64(dma_handle); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + rte_free(buf); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - rte_free(buf); return rc; } @@ -3434,29 +3888,20 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, int rc; /* First query all VNIC ids */ - HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY); + HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); - if (req.vnic_id_tbl_addr == 0) { + if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); PMD_DRV_LOG(ERR, "unable to map VNIC ID table address to physical memory\n"); return -ENOMEM; } - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - if (rc) { - HWRM_UNLOCK(); - PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); - return -1; - } else if (resp->error_code) { - rc = rte_le_to_cpu_16(resp->error_code); - HWRM_UNLOCK(); - PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc); - return -1; - } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_32(resp->vnic_id_cnt); HWRM_UNLOCK(); @@ -3484,10 +3929,9 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); - if (vnic_ids == NULL) { - rc = -ENOMEM; - return rc; - } + if (vnic_ids == NULL) + return -ENOMEM; + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) rte_mem_lock_page(((char *)vnic_ids) + sz); @@ -3526,7 +3970,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG); + HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( @@ -3534,7 +3978,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, req.vlan_antispoof_mode = on ? HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN : HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3554,10 +3998,8 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); - if (vnic_ids == NULL) { - rc = -ENOMEM; - return rc; - } + if (vnic_ids == NULL) + return -ENOMEM; for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) rte_mem_lock_page(((char *)vnic_ids) + sz); @@ -3588,7 +4030,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) PMD_DRV_LOG(ERR, "No default VNIC\n"); exit: rte_free(vnic_ids); - return -1; + return rc; } int bnxt_hwrm_set_em_filter(struct bnxt *bp, @@ -3603,7 +4045,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp, if (filter->fw_em_filter_id != UINT64_MAX) bnxt_hwrm_clear_em_filter(bp, filter); - HWRM_PREP(req, CFA_EM_FLOW_ALLOC); + HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); req.flags = rte_cpu_to_le_32(filter->flags); @@ -3621,11 +4063,11 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp, if (enables & HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR) memcpy(req.src_macaddr, filter->src_macaddr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR) memcpy(req.dst_macaddr, filter->dst_macaddr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID) req.ovlan_vid = filter->l2_ovlan; @@ -3656,7 +4098,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp, req.enables = rte_cpu_to_le_32(enables); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); HWRM_CHECK_RESULT(); @@ -3676,11 +4118,11 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) return 0; PMD_DRV_LOG(ERR, "Clear EM filter\n"); - HWRM_PREP(req, CFA_EM_FLOW_FREE); + HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3704,7 +4146,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id != UINT64_MAX) bnxt_hwrm_clear_ntuple_filter(bp, filter); - HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC); + HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); @@ -3724,11 +4166,11 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, if (enables & HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) memcpy(req.src_macaddr, filter->src_macaddr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); //if (enables & //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) //memcpy(req.dst_macaddr, filter->dst_macaddr, - //ETHER_ADDR_LEN); + //RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) req.ethertype = rte_cpu_to_be_16(filter->ethertype); @@ -3767,7 +4209,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, req.enables = rte_cpu_to_le_32(enables); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3788,11 +4230,11 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE); + HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -3802,10 +4244,90 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, return 0; } +static int +bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct bnxt_rx_queue **rxqs = bp->rx_queues; + uint16_t *ring_tbl = vnic->rss_table; + int nr_ctxs = vnic->num_lb_ctxts; + int max_rings = bp->rx_nr_rings; + int i, j, k, cnt; + int rc = 0; + + for (i = 0, k = 0; i < nr_ctxs; i++) { + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + + HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr + + i * BNXT_RSS_ENTRIES_PER_CTX_THOR * + 2 * sizeof(*ring_tbl)); + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + + req.ring_table_pair_index = i; + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]); + + for (j = 0; j < 64; j++) { + uint16_t ring_id; + + /* Find next active ring. */ + for (cnt = 0; cnt < max_rings; cnt++) { + if (rx_queue_state[k] != + RTE_ETH_QUEUE_STATE_STOPPED) + break; + if (++k == max_rings) + k = 0; + } + + /* Return if no rings are active. */ + if (cnt == max_rings) + return 0; + + /* Add rx/cp ring pair to RSS table. */ + rxr = rxqs[k]->rx_ring; + cpr = rxqs[k]->cp_ring; + + ring_id = rxr->rx_ring_struct->fw_ring_id; + *ring_tbl++ = rte_cpu_to_le_16(ring_id); + ring_id = cpr->cp_ring_struct->fw_ring_id; + *ring_tbl++ = rte_cpu_to_le_16(ring_id); + + if (++k == max_rings) + k = 0; + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + } + + return rc; +} + int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) { unsigned int rss_idx, fw_idx, i; + if (!(vnic->rss_table && vnic->hash_type)) + return 0; + + if (BNXT_CHIP_THOR(bp)) + return bnxt_vnic_rss_configure_thor(bp, vnic); + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + if (vnic->rss_table && vnic->hash_type) { /* * Fill the RSS hash & redirection table with @@ -3822,11 +4344,11 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) } if (i == bp->rx_cp_nr_rings) return 0; - vnic->rss_table[rss_idx] = - vnic->fw_grp_ids[fw_idx]; + vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; } return bnxt_hwrm_vnic_rss_cfg(bp, vnic); } + return 0; } @@ -3860,6 +4382,34 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, req->flags = rte_cpu_to_le_16(flags); } +static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req) +{ + struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables; + uint16_t flags; + int rc; + + HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max; + agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min; + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + agg_req->flags = rte_cpu_to_le_16(flags); + enables = + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR; + agg_req->enables = rte_cpu_to_le_32(enables); + + HWRM_UNLOCK(); + return rc; +} + int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_coal *coal, uint16_t ring_id) { @@ -3868,15 +4418,576 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, bp->hwrm_cmd_resp_addr; int rc; - /* Set ring coalesce parameters only for Stratus 100G NIC */ - if (!bnxt_stratus_device(bp)) + /* Set ring coalesce parameters only for 100G NICs */ + if (BNXT_CHIP_THOR(bp)) { + if (bnxt_hwrm_set_coal_params_thor(bp, &req)) + return -1; + } else if (bnxt_stratus_device(bp)) { + bnxt_hwrm_set_coal_params(coal, &req); + } else { return 0; + } - HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS); - bnxt_hwrm_set_coal_params(coal, &req); + HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB); req.ring_id = rte_cpu_to_le_16(ring_id); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return 0; +} + +#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG) +int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_input req = {0}; + struct hwrm_func_backing_store_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int total_alloc_len; + int rc, i; + + if (!BNXT_CHIP_THOR(bp) || + bp->hwrm_spec_code < HWRM_VERSION_1_9_2 || + BNXT_VF(bp) || + bp->ctx) + return 0; + + HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT_SILENT(); + + total_alloc_len = sizeof(*ctx); + ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len, + RTE_CACHE_LINE_SIZE); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } + + ctx_pg = rte_malloc("bnxt_ctx_pg_mem", + sizeof(*ctx_pg) * BNXT_MAX_Q, + RTE_CACHE_LINE_SIZE); + if (!ctx_pg) { + rc = -ENOMEM; + goto ctx_err; + } + for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + + bp->ctx = ctx; + ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); + ctx->qp_min_qp1_entries = + rte_le_to_cpu_16(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = + rte_le_to_cpu_16(resp->qp_max_l2_entries); + ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); + ctx->srq_max_l2_entries = + rte_le_to_cpu_16(resp->srq_max_l2_entries); + ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); + ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); + ctx->cq_max_l2_entries = + rte_le_to_cpu_16(resp->cq_max_l2_entries); + ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); + ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + rte_le_to_cpu_16(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); + ctx->stat_max_entries = + rte_le_to_cpu_32(resp->stat_max_entries); + ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); + ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = + rte_le_to_cpu_32(resp->mrav_max_entries); + ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); + ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); + ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); +ctx_err: + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables) +{ + struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_output *resp = + bp->hwrm_cmd_resp_addr; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + uint32_t *num_entries; + uint64_t *pg_dir; + uint8_t *pg_attr; + uint32_t ena; + int i, rc; + + if (!ctx) + return 0; + + HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); + req.enables = rte_cpu_to_le_32(enables); + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) { + ctx_pg = &ctx->qp_mem; + req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.qp_num_qp1_entries = + rte_cpu_to_le_16(ctx->qp_min_qp1_entries); + req.qp_num_l2_entries = + rte_cpu_to_le_16(ctx->qp_max_l2_entries); + req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.qpc_pg_size_qpc_lvl, + &req.qpc_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) { + ctx_pg = &ctx->srq_mem; + req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.srq_num_l2_entries = + rte_cpu_to_le_16(ctx->srq_max_l2_entries); + req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.srq_pg_size_srq_lvl, + &req.srq_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) { + ctx_pg = &ctx->cq_mem; + req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries); + req.cq_num_l2_entries = + rte_cpu_to_le_16(ctx->cq_max_l2_entries); + req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.cq_pg_size_cq_lvl, + &req.cq_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) { + ctx_pg = &ctx->vnic_mem; + req.vnic_num_vnic_entries = + rte_cpu_to_le_16(ctx->vnic_max_vnic_entries); + req.vnic_num_ring_table_entries = + rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries); + req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.vnic_pg_size_vnic_lvl, + &req.vnic_page_dir); + } + + if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) { + ctx_pg = &ctx->stat_mem; + req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries); + req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.stat_pg_size_stat_lvl, + &req.stat_page_dir); + } + + req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size); + num_entries = &req.tqm_sp_num_entries; + pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl; + pg_dir = &req.tqm_sp_page_dir; + ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP; + for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { + if (!(enables & ena)) + continue; + + req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size); + + ctx_pg = ctx->tqm_mem[i]; + *num_entries = rte_cpu_to_le_16(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_ext_port_qstats(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS || + bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS)) + return 0; + + HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); + + req.port_id = rte_cpu_to_le_16(pf->port_id); + if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) { + req.tx_stat_host_addr = + rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map); + req.tx_stat_size = + rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext)); + } + if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) { + req.rx_stat_host_addr = + rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map); + req.rx_stat_size = + rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext)); + } + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + if (rc) { + bp->fw_rx_port_stats_ext_size = 0; + bp->fw_tx_port_stats_ext_size = 0; + } else { + bp->fw_rx_port_stats_ext_size = + rte_le_to_cpu_16(resp->rx_stat_size); + bp->fw_tx_port_stats_ext_size = + rte_le_to_cpu_16(resp->tx_stat_size); + } + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int +bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type) +{ + struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); + req.tunnel_type = type; + req.dest_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + +int +bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type) +{ + struct hwrm_cfa_redirect_tunnel_type_free_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_free_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); + req.tunnel_type = type; + req.dest_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type) +{ + struct hwrm_cfa_redirect_query_tunnel_type_input req = {0}; + struct hwrm_cfa_redirect_query_tunnel_type_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); + req.src_fid = bp->fw_fid; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + HWRM_CHECK_RESULT(); + + if (type) + *type = rte_le_to_cpu_32(resp->tunnel_mask); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, + uint16_t *dst_fid) +{ + struct hwrm_cfa_redirect_tunnel_type_info_input req = {0}; + struct hwrm_cfa_redirect_tunnel_type_info_output *resp = + bp->hwrm_cmd_resp_addr; + int rc = 0; + + HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); + req.src_fid = bp->fw_fid; + req.tunnel_type = tun_type; + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); + + if (dst_fid) + *dst_fid = rte_le_to_cpu_16(resp->dest_fid); + + PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_set_mac(struct bnxt *bp) +{ + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + + req.enables = + rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; + uint32_t flags; + int rc; + + if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE)) + return 0; + + /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. + * If we issue FUNC_DRV_IF_CHANGE with flags down before + * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR + */ + if (!up && (bp->flags & BNXT_FLAG_FW_RESET)) + return 0; + + HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); + + if (up) + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) { + PMD_DRV_LOG(INFO, "FW reset happened while port was down\n"); + bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + } + return 0; } + +int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_error_recovery_info *info = bp->recovery_info; + struct hwrm_error_recovery_qcfg_input req = {0}; + uint32_t flags = 0; + unsigned int i; + int rc; + + /* Older FW does not have error recovery support */ + if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)) + return 0; + + if (!info) { + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + bp->recovery_info = info; + if (info == NULL) + return -ENOMEM; + } else { + memset(info, 0, sizeof(*info)); + } + + HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST; + else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU; + + if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) && + !(bp->flags & BNXT_FLAG_KONG_MB_EN)) { + rc = -EINVAL; + goto err; + } + + /* FW returned values are in units of 100msec */ + info->driver_polling_freq = + rte_le_to_cpu_32(resp->driver_polling_freq) * 100; + info->master_func_wait_period = + rte_le_to_cpu_32(resp->master_func_wait_period) * 100; + info->normal_func_wait_period = + rte_le_to_cpu_32(resp->normal_func_wait_period) * 100; + info->master_func_wait_period_after_reset = + rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100; + info->max_bailout_time_after_reset = + rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100; + info->status_regs[BNXT_FW_STATUS_REG] = + rte_le_to_cpu_32(resp->fw_health_status_reg); + info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] = + rte_le_to_cpu_32(resp->fw_heartbeat_reg); + info->status_regs[BNXT_FW_RECOVERY_CNT_REG] = + rte_le_to_cpu_32(resp->fw_reset_cnt_reg); + info->status_regs[BNXT_FW_RESET_INPROG_REG] = + rte_le_to_cpu_32(resp->reset_inprogress_reg); + info->reg_array_cnt = + rte_le_to_cpu_32(resp->reg_array_cnt); + + if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) { + rc = -EINVAL; + goto err; + } + + for (i = 0; i < info->reg_array_cnt; i++) { + info->reset_reg[i] = + rte_le_to_cpu_32(resp->reset_reg[i]); + info->reset_reg_val[i] = + rte_le_to_cpu_32(resp->reset_reg_val[i]); + info->delay_after_reset[i] = + resp->delay_after_reset[i]; + } +err: + HWRM_UNLOCK(); + + /* Map the FW status registers */ + if (!rc) + rc = bnxt_map_fw_health_status_regs(bp); + + if (rc) { + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + } + return rc; +} + +int bnxt_hwrm_fw_reset(struct bnxt *bp) +{ + struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fw_reset_input req = {0}; + int rc; + + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp)); + + req.embedded_proc_type = + HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = + HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP; + req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) +{ + struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_ts_query_input req = {0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB); + + switch (path) { + case BNXT_PTP_FLAGS_PATH_TX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX; + break; + case BNXT_PTP_FLAGS_PATH_RX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX; + break; + case BNXT_PTP_FLAGS_CURRENT_TIME: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME; + break; + } + + req.flags = rte_cpu_to_le_32(flags); + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (timestamp) { + *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]); + *timestamp |= + (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32; + } + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + uint32_t flags = 0; + int rc = 0; + + if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT)) + return rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) { + bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN; + PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n"); + } + + return rc; +}