#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_version.h>
+#include <rte_io.h>
#include "bnxt.h"
-#include "bnxt_cpr.h"
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_rxq.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
-#include <rte_io.h>
-
-#define HWRM_CMD_TIMEOUT 6000000
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
+#define HWRM_VERSION_1_9_2 0x10903
struct bnxt_plcmodes_cfg {
uint32_t flags;
return 1 << page_getenum(size);
}
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
+ uint8_t *pg_attr,
+ uint64_t *pg_dir)
+{
+ if (rmem->nr_pages > 1) {
+ *pg_attr = 1;
+ *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
+ }
+}
+
/*
* HWRM Functions (sent to HWRM)
- * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
- * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
- * command was failed by the ChiMP.
+ * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
+ * HWRM command times out, or a negative error code if the HWRM
+ * command was failed by the FW.
*/
static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
uint16_t mb_trigger_offset = use_kong_mb ?
GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+ uint32_t timeout;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ /* Do not send HWRM commands to firmware in error state */
+ if (bp->flags & BNXT_FLAG_FATAL_ERROR)
+ return 0;
+
+ /* For VER_GET command, set timeout as 50ms */
+ if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+ timeout = HWRM_CMD_TIMEOUT;
+ else
+ timeout = bp->hwrm_cmd_timeout;
+
+ if (bp->flags & BNXT_FLAG_SHORT_CMD ||
+ msg_len > bp->max_req_len) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
- memset(short_cmd_req, 0, bp->max_req_len);
+ memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
memcpy(short_cmd_req, req, msg_len);
short_input.req_type = rte_cpu_to_le_16(req->req_type);
data = (uint32_t *)&short_input;
msg_len = sizeof(short_input);
- /* Sync memory write before updating doorbell */
- rte_wmb();
-
max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
}
/* Ring channel doorbell */
bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
rte_write32(1, bar);
+ /*
+ * Make sure the channel doorbell ring command complete before
+ * reading the response to avoid getting stale or invalid
+ * responses.
+ */
+ rte_io_mb();
/* Poll for the valid bit */
- for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
/* Sanity check on the resp->resp_len */
- rte_rmb();
- if (resp->resp_len && resp->resp_len <=
- bp->max_resp_len) {
+ rte_cio_rmb();
+ if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
/* Last byte of resp contains the valid key */
valid = (uint8_t *)resp + resp->resp_len - 1;
if (*valid == HWRM_RESP_VALID_KEY)
rte_delay_us(1);
}
- if (i >= HWRM_CMD_TIMEOUT) {
- PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
- req->req_type);
- goto err_ret;
+ if (i >= timeout) {
+ /* Suppress VER_GET timeout messages during reset recovery */
+ if (bp->flags & BNXT_FLAG_FW_RESET &&
+ rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
+ return -ETIMEDOUT;
+
+ PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
+ req->req_type);
+ return -ETIMEDOUT;
}
return 0;
-
-err_ret:
- return -1;
}
/*
- * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
* spinlock, and does initial processing.
*
* HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
- * releases the spinlock only if it returns. If the regular int return codes
+ * releases the spinlock only if it returns. If the regular int return codes
* are not used by the function, HWRM_CHECK_RESULT() should not be used
* directly, rather it should be copied and modified to suit the function.
*
rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
rc = -EACCES; \
- else if (rc > 0) \
+ else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+ rc = -ENOSPC; \
+ else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
rc = -EINVAL; \
+ else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+ rc = -ENOTSUP; \
+ else if (rc > 0) \
+ rc = -EIO; \
return rc; \
} \
if (resp->error_code) { \
rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
rc = -EACCES; \
- else if (rc > 0) \
+ else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
+ rc = -ENOSPC; \
+ else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
rc = -EINVAL; \
+ else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
+ rc = -ENOTSUP; \
+ else if (rc > 0) \
+ rc = -EIO; \
return rc; \
} \
} while (0)
HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
- /* FIXME add multicast flag, when multicast adding options is supported
- * by ethtool.
- */
if (vnic->flags & BNXT_VNIC_INFO_BCAST)
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
+
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
- if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
+
+ if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
- if (vnic->flags & BNXT_VNIC_INFO_MCAST)
- mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
- if (vnic->mc_addr_cnt) {
+ } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
struct bnxt_filter_info *filter)
{
int rc = 0;
+ struct bnxt_filter_info *l2_filter = filter;
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
if (filter->fw_l2_filter_id == UINT64_MAX)
return 0;
+ if (filter->matching_l2_fltr_ptr)
+ l2_filter = filter->matching_l2_fltr_ptr;
+
+ PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
+ filter, l2_filter, l2_filter->l2_ref_cnt);
+
+ if (l2_filter->l2_ref_cnt > 0)
+ l2_filter->l2_ref_cnt--;
+
+ if (l2_filter->l2_ref_cnt > 0)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
req.flags = rte_cpu_to_le_32(filter->flags);
- req.flags |=
- rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
memcpy(req.l2_addr, filter->l2_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
memcpy(req.l2_addr_mask, filter->l2_addr_mask,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
req.l2_ovlan = filter->l2_ovlan;
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
req.src_type = filter->src_type;
+ if (filter->pri_hint) {
+ req.pri_hint = filter->pri_hint;
+ req.l2_filter_id_hint =
+ rte_cpu_to_le_64(filter->l2_filter_id_hint);
+ }
req.enables = rte_cpu_to_le_32(enables);
struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
if (ptp)
return 0;
HWRM_CHECK_RESULT();
- if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
+ if (!BNXT_CHIP_THOR(bp) &&
+ !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
+ if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
+ bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
+
ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
if (!ptp)
return -ENOMEM;
- ptp->rx_regs[BNXT_PTP_RX_TS_L] =
- rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
- ptp->rx_regs[BNXT_PTP_RX_TS_H] =
- rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
- ptp->rx_regs[BNXT_PTP_RX_SEQ] =
- rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
- ptp->rx_regs[BNXT_PTP_RX_FIFO] =
- rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
- ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
- rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
- ptp->tx_regs[BNXT_PTP_TX_TS_L] =
- rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
- ptp->tx_regs[BNXT_PTP_TX_TS_H] =
- rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
- ptp->tx_regs[BNXT_PTP_TX_SEQ] =
- rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
- ptp->tx_regs[BNXT_PTP_TX_FIFO] =
- rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+ if (!BNXT_CHIP_THOR(bp)) {
+ ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+ ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+ ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+ ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+ ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+ ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+ ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+ }
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
bp->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+ memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ if (!BNXT_CHIP_THOR(bp))
+ bp->max_l2_ctx += bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
if (bp->pf.max_vfs)
}
}
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
+ bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
+
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
+ bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+ PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
+ } else {
+ bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
+ }
+
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
+ bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+ else
+ bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
+
HWRM_UNLOCK();
return rc;
rc = __bnxt_hwrm_func_qcaps(bp);
if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_hwrm_func_resc_qcaps(bp);
if (!rc)
bp->flags |= BNXT_FLAG_NEW_RM;
return rc;
}
+/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
+int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
+ struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB);
+
+ req.target_id = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ if (rte_le_to_cpu_32(resp->flags) &
+ HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
+ PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
+ }
+
+ bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
int bnxt_hwrm_func_reset(struct bnxt *bp)
{
int rc = 0;
int bnxt_hwrm_func_driver_register(struct bnxt *bp)
{
int rc;
+ uint32_t flags = 0;
struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
+ flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
+
+ /* PFs and trusted VFs should indicate the support of the
+ * Master capability on non Stingray platform
+ */
+ if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
+
HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
* this HWRM sniffer list in FW because DPDK PF driver does
* not support this.
*/
- req.flags =
- rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
}
+ req.flags = rte_cpu_to_le_32(flags);
+
req.async_event_fwd[0] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
- ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
+ ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
+ if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
req.async_event_fwd[1] |=
rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
+
+ flags = rte_le_to_cpu_32(resp->flags);
+ if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
+ bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
+
HWRM_UNLOCK();
bp->flags |= BNXT_FLAG_REGISTERED;
HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
- req.enables = rte_cpu_to_le_32
- (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
+ enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ }
req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
AGG_RING_MULTIPLIER);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
- bp->tx_nr_rings);
- req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ bp->tx_nr_rings +
+ BNXT_NUM_ASYNC_CPR(bp));
req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
if (bp->vf_resv_strategy ==
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
- enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
- req.enables |= rte_cpu_to_le_32(enables);
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+ } else if (bp->vf_resv_strategy ==
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
}
if (test)
HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
+ if (test && BNXT_HAS_RING_GRPS(bp))
+ flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
+
req.flags = rte_cpu_to_le_32(flags);
+ req.enables |= rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ /* func_resource_qcaps does not return max_rx_em_flows.
+ * So use the value provided by func_qcaps.
+ */
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ if (!BNXT_CHIP_THOR(bp))
+ bp->max_l2_ctx += bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
+ bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
if (bp->vf_resv_strategy >
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- HWRM_CHECK_RESULT();
+ if (bp->flags & BNXT_FLAG_FW_RESET)
+ HWRM_CHECK_RESULT_SILENT();
+ else
+ HWRM_CHECK_RESULT();
PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
fw_version |= resp->hwrm_intf_upd_8b;
bp->hwrm_spec_code = fw_version;
+ /* def_req_timeout value is in milliseconds */
+ bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
+ /* convert timeout to usec */
+ bp->hwrm_cmd_timeout *= 1000;
+ if (!bp->hwrm_cmd_timeout)
+ bp->hwrm_cmd_timeout = HWRM_CMD_TIMEOUT;
+
if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
- max_resp_len = resp->max_resp_len;
+ bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
+ if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
if (bp->max_resp_len != max_resp_len) {
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
- if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
PMD_DRV_LOG(DEBUG, "Short command supported\n");
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+ }
+
+ if (((dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
+ bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
+ sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
rte_free(bp->hwrm_short_cmd_req_addr);
- bp->hwrm_short_cmd_req_addr = rte_malloc(type,
- bp->max_req_len, 0);
+ bp->hwrm_short_cmd_req_addr =
+ rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
if (bp->hwrm_short_cmd_req_addr == NULL) {
rc = -ENOMEM;
goto error;
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
- if (bp->hwrm_short_cmd_req_dma_addr == 0) {
+ if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
}
-
- bp->flags |= BNXT_FLAG_SHORT_CMD;
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
+ if (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
+ PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
+ }
error:
HWRM_UNLOCK();
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- bp->flags &= ~BNXT_FLAG_REGISTERED;
-
return rc;
}
int rc = 0;
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
int i;
+get_rx_info:
HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
- req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+ req.flags = rte_cpu_to_le_32(dir);
/* HWRM Version >= 1.9.1 */
if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
req.drv_qmap_cap =
HWRM_CHECK_RESULT();
-#define GET_QUEUE_INFO(x) \
- bp->cos_queue[x].id = resp->queue_id##x; \
- bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
-
- GET_QUEUE_INFO(0);
- GET_QUEUE_INFO(1);
- GET_QUEUE_INFO(2);
- GET_QUEUE_INFO(3);
- GET_QUEUE_INFO(4);
- GET_QUEUE_INFO(5);
- GET_QUEUE_INFO(6);
- GET_QUEUE_INFO(7);
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+ GET_TX_QUEUE_INFO(0);
+ GET_TX_QUEUE_INFO(1);
+ GET_TX_QUEUE_INFO(2);
+ GET_TX_QUEUE_INFO(3);
+ GET_TX_QUEUE_INFO(4);
+ GET_TX_QUEUE_INFO(5);
+ GET_TX_QUEUE_INFO(6);
+ GET_TX_QUEUE_INFO(7);
+ } else {
+ GET_RX_QUEUE_INFO(0);
+ GET_RX_QUEUE_INFO(1);
+ GET_RX_QUEUE_INFO(2);
+ GET_RX_QUEUE_INFO(3);
+ GET_RX_QUEUE_INFO(4);
+ GET_RX_QUEUE_INFO(5);
+ GET_RX_QUEUE_INFO(6);
+ GET_RX_QUEUE_INFO(7);
+ }
HWRM_UNLOCK();
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
+ goto done;
+
if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
- bp->tx_cosq_id = bp->cos_queue[0].id;
+ bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
} else {
+ int j;
+
/* iterate and find the COSq profile to use for Tx */
- for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
- if (bp->cos_queue[i].profile ==
- HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
- bp->tx_cosq_id = bp->cos_queue[i].id;
- break;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+ for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->tx_cos_queue[i].id != 0xff)
+ bp->tx_cosq_id[j++] =
+ bp->tx_cos_queue[i].id;
+ }
+ } else {
+ for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
+ if (bp->tx_cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id[0] =
+ bp->tx_cos_queue[i].id;
+ break;
+ }
}
}
}
- PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+ bp->max_tc = resp->max_configurable_queues;
+ bp->max_lltc = resp->max_configurable_lossless_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+ bp->max_q = bp->max_tc;
+
+ if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+ dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
+ goto get_rx_info;
+ }
+
+done:
return rc;
}
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
- uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
+ uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
+ uint16_t tx_cosq_id)
{
int rc = 0;
uint32_t enables = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct rte_mempool *mb_pool;
+ uint16_t rx_buf_size;
HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
- req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
- /* FALLTHROUGH */
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
- req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+ req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
if (stats_ctx_id != INVALID_STATS_CTX_ID)
enables |=
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+ req.ring_type = ring_type;
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+ if (BNXT_CHIP_THOR(bp)) {
+ mb_pool = bp->rx_queues[0]->mb_pool;
+ rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
+ req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
+ }
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
req.ring_type = ring_type;
- /*
- * TODO: Some HWRM versions crash with
- * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
- */
+ if (BNXT_HAS_NQ(bp)) {
+ /* Association of cp ring with nq */
+ req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
+ }
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+ req.ring_type = ring_type;
+ req.page_size = BNXT_PAGE_SHFT;
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+ req.ring_type = ring_type;
+ req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
+
+ mb_pool = bp->rx_queues[0]->mb_pool;
+ rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
+ req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+ enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ break;
default:
PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
- return -1;
+ return -EINVAL;
}
req.enables = rte_cpu_to_le_32(enables);
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
- case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ "hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
- case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc rx agg failed. rc:%d\n",
+ rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc nq failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_free nq failed. rc:%d\n", rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_free agg failed. rc:%d\n", rc);
+ return rc;
default:
PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
HWRM_CHECK_RESULT();
- bp->grp_info[idx].fw_grp_id =
- rte_le_to_cpu_16(resp->ring_group_id);
+ bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
HWRM_UNLOCK();
HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
- req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
req.update_period_ms = rte_cpu_to_le_32(0);
- req.stats_dma_addr =
- rte_cpu_to_le_64(cpr->hw_stats_map);
+ req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
- cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+ cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
HWRM_UNLOCK();
HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
- req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
struct hwrm_vnic_alloc_input req = { 0 };
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ goto skip_ring_grps;
+
/* map ring groups to this vnic */
PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
- vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE;
+
+skip_ring_grps:
+ vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
if (vnic->func_default)
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_plcmodes_cfg pmodes = { 0 };
uint32_t ctx_enable_flag = 0;
- struct bnxt_plcmodes_cfg pmodes;
+ uint32_t enables = 0;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
+ if (BNXT_CHIP_THOR(bp)) {
+ struct bnxt_rx_queue *rxq =
+ bp->eth_dev->data->rx_queues[vnic->start_grp_id];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ req.default_rx_ring_id =
+ rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
+ req.default_cmpl_ring_id =
+ rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
+ enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
+ HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
+ goto config_mru;
+ }
+
/* Only RSS support for now TBD: COS & LB */
- req.enables =
- rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
+ enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
if (vnic->lb_rule != 0xffff)
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
if (vnic->cos_rule != 0xffff)
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
- if (vnic->rss_rule != 0xffff) {
+ if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
}
- req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
- req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
+ req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
+ }
+
+ enables |= ctx_enable_flag;
req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+
+config_mru:
+ req.enables = rte_cpu_to_le_32(enables);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mru = rte_cpu_to_le_16(vnic->mru);
/* Configure default VNIC only once. */
if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
return rc;
}
-int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
{
int rc = 0;
+ uint16_t ctx_id;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
-
HWRM_CHECK_RESULT();
- vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ if (!BNXT_HAS_RING_GRPS(bp))
+ vnic->fw_grp_ids[ctx_idx] = ctx_id;
+ else if (ctx_idx == 0)
+ vnic->rss_rule = ctx_id;
+
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
-int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static
+int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
{
int rc = 0;
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
- if (vnic->rss_rule == 0xffff) {
+ if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
- req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- vnic->rss_rule = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+
+ if (BNXT_CHIP_THOR(bp)) {
+ int j;
+
+ for (j = 0; j < vnic->num_lb_ctxts; j++) {
+ rc = _bnxt_hwrm_vnic_ctx_free(bp,
+ vnic,
+ vnic->fw_grp_ids[j]);
+ vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+ }
+ vnic->num_lb_ctxts = 0;
+ } else {
+ rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
+ vnic->rss_rule = INVALID_HW_RING_ID;
+ }
return rc;
}
return rc;
}
+static int
+bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int i;
+ int rc = 0;
+ int nr_ctxs = vnic->num_lb_ctxts;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ for (i = 0; i < nr_ctxs; i++) {
+ HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+ req.hash_mode_flags = vnic->hash_mode;
+
+ req.hash_key_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+ req.ring_grp_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_table_dma_addr +
+ i * HW_HASH_INDEX_SIZE);
+ req.ring_table_pair_index = i;
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+ BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ }
+
+ return rc;
+}
+
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ if (!vnic->rss_table)
+ return 0;
+
+ if (BNXT_CHIP_THOR(bp))
+ return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
+
HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
size -= RTE_PKTMBUF_HEADROOM;
+ size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
req.jumbo_thresh = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
+ if (enable)
+ PMD_DRV_LOG(ERR, "No HW support for LRO\n");
+ return -ENOTSUP;
+ }
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
+ return 0;
+ }
+
HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
if (enable) {
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
- req.max_agg_segs = rte_cpu_to_le_16(5);
- req.max_aggs =
- rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
+ req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
req.min_agg_len = rte_cpu_to_le_32(512);
}
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
return rc;
}
-/*
- * HWRM utility functions
- */
-
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
{
unsigned int i;
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
} else {
cpr = bp->rx_queues[i]->cp_ring;
- bp->grp_info[i].fw_stats_ctx = -1;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[i].fw_stats_ctx = -1;
}
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
uint16_t idx;
uint32_t rc = 0;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ return 0;
+
for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
return rc;
}
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+ cpr->valid = 0;
+}
+
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
+ cpr->valid = 0;
}
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
bnxt_hwrm_ring_free(bp, ring,
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
memset(rxr->rx_desc_ring, 0,
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_desc_ring));
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
+ BNXT_CHIP_THOR(bp) ?
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->ag_buf_ring, 0,
rxr->ag_ring_struct->ring_size *
sizeof(*rxr->ag_buf_ring));
rxr->ag_prod = 0;
- bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].ag_fw_ring_id =
+ INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr);
- bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
+ if (BNXT_HAS_RING_GRPS(bp))
+ bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
uint16_t i;
uint32_t rc = 0;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ return 0;
+
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rc = bnxt_hwrm_ring_grp_alloc(bp, i);
if (rc)
return rc;
}
+/*
+ * HWRM utility functions
+ */
+
void bnxt_free_hwrm_resources(struct bnxt *bp)
{
/* Release memzone */
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
- if (bp->hwrm_cmd_resp_dma_addr == 0) {
+ if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
- //if (rc)
- //break;
+ bnxt_free_filter(bp, filter);
}
return rc;
}
struct rte_flow *flow;
int rc = 0;
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ while (!STAILQ_EMPTY(&vnic->flow_list)) {
+ flow = STAILQ_FIRST(&vnic->flow_list);
filter = flow->filter;
- PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
- //if (rc)
- //break;
}
return rc;
}
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ if (filter->filter_type == HWRM_CFA_EM_FILTER) {
rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
filter);
- else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
filter);
- else
+ } else {
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
filter);
+ if (!rc)
+ filter->dflt = 1;
+ }
if (rc)
break;
}
* Cleanup VNICs in reverse order, to make sure the L2 filter
* from vnic0 is last to be cleaned up.
*/
- for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ for (i = bp->max_vnics - 1; i >= 0; i--) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ continue;
+
bnxt_clear_hwrm_vnic_flows(bp, vnic);
bnxt_clear_hwrm_vnic_filters(bp, vnic);
goto port_phy_cfg;
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
+ if (BNXT_CHIP_THOR(bp) &&
+ dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
+ /* 40G is not supported as part of media auto detect.
+ * The speed should be forced and autoneg disabled
+ * to configure 40G speed.
+ */
+ PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
+ autoneg = 0;
+ }
+
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- /* Autoneg can be done only when the FW allows */
- if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
- bp->link_info.force_link_speed)) {
+ /* Autoneg can be done only when the FW allows.
+ * When user configures fixed speed of 40G and later changes to
+ * any other speed, auto_link_speed/force_link_speed is still set
+ * to 40G until link comes up at new speed.
+ */
+ if (autoneg == 1 &&
+ !(!BNXT_CHIP_THOR(bp) &&
+ (bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed))) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
}
/* JIRA 22088 */
-int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
- if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+ if (BNXT_VF(bp) &&
+ !BNXT_VF_IS_TRUSTED(bp) &&
+ (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
+ } else if (BNXT_VF(bp) &&
+ BNXT_VF_IS_TRUSTED(bp) &&
+ !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
+ bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
+ PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
}
+ if (mtu)
+ *mtu = rte_le_to_cpu_16(resp->mtu);
+
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
{
struct hwrm_func_cfg_input req = {0};
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables;
int rc;
- req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
- HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ } else if (BNXT_HAS_NQ(bp)) {
+ enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
+ req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+ }
+
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
- req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
- req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
- BNXT_NUM_VLANS);
+ req.mtu = rte_cpu_to_le_16(RTE_MIN(bp->eth_dev->data->mtu,
+ BNXT_MAX_MTU)); //FW adds hdr sizes
+ req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
- req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(enables);
HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
- req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
- BNXT_NUM_VLANS);
- req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
BNXT_NUM_VLANS);
+ req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
rte_eth_random_addr(cfg_req->dflt_mac_addr);
bp->pf.vf_info[vf].random_mac = true;
} else {
- memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
}
}
HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
- return -1;
- } else if (resp->error_code) {
- rc = rte_le_to_cpu_16(resp->error_code);
- PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
- return -1;
- }
+ HWRM_CHECK_RESULT();
rc = rte_le_to_cpu_16(resp->vlan);
HWRM_UNLOCK();
if (!BNXT_PF(bp)) {
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
- return -1;
+ return -EINVAL;
}
rc = bnxt_hwrm_func_qcaps(bp);
bp->pf.func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ rc = __bnxt_hwrm_func_qcaps(bp);
return rc;
}
if (!BNXT_PF(bp)) {
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
- return -1;
+ return -EINVAL;
}
rc = bnxt_hwrm_func_qcaps(bp);
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
- if (req.req_buf_page_addr0 == 0) {
+ if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+ if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
+ return 0;
+
HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
- bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ bp->async_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
- bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+ bp->async_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_CHECK_RESULT();
- memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+ memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
HWRM_UNLOCK();
stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
}
-
HWRM_UNLOCK();
return rc;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
- if (!rc) {
- *entries = rte_le_to_cpu_32(resp->entries);
- *length = rte_le_to_cpu_32(resp->entry_length);
- }
+ *entries = rte_le_to_cpu_32(resp->entries);
+ *length = rte_le_to_cpu_32(resp->entry_length);
+
+ HWRM_UNLOCK();
return rc;
}
if (buf == NULL)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
- if (dma_handle == 0) {
+ if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
- if (dma_handle == 0) {
+ if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
- if (dma_handle == 0) {
+ if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
- if (req.vnic_id_tbl_addr == 0) {
+ if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
- if (rc) {
- HWRM_UNLOCK();
- PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
- return -1;
- } else if (resp->error_code) {
- rc = rte_le_to_cpu_16(resp->error_code);
- HWRM_UNLOCK();
- PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
- return -1;
- }
+ HWRM_CHECK_RESULT();
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
HWRM_UNLOCK();
vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
- if (vnic_ids == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (vnic_ids == NULL)
+ return -ENOMEM;
+
for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
rte_mem_lock_page(((char *)vnic_ids) + sz);
vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
- if (vnic_ids == NULL) {
- rc = -ENOMEM;
- return rc;
- }
+ if (vnic_ids == NULL)
+ return -ENOMEM;
for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
rte_mem_lock_page(((char *)vnic_ids) + sz);
PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
- return -1;
+ return rc;
}
int bnxt_hwrm_set_em_filter(struct bnxt *bp,
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
memcpy(req.src_macaddr, filter->src_macaddr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
memcpy(req.dst_macaddr, filter->dst_macaddr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
req.ovlan_vid = filter->l2_ovlan;
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
req.dst_id = rte_cpu_to_le_16(dst_id);
-
if (filter->ip_addr_type) {
req.ip_addr_type = filter->ip_addr_type;
enables |=
if (enables &
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
memcpy(req.src_macaddr, filter->src_macaddr,
- ETHER_ADDR_LEN);
- //if (enables &
- //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
- //memcpy(req.dst_macaddr, filter->dst_macaddr,
- //ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
req.ethertype = rte_cpu_to_be_16(filter->ethertype);
return 0;
}
+static int
+bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct bnxt_rx_queue **rxqs = bp->rx_queues;
+ uint16_t *ring_tbl = vnic->rss_table;
+ int nr_ctxs = vnic->num_lb_ctxts;
+ int max_rings = bp->rx_nr_rings;
+ int i, j, k, cnt;
+ int rc = 0;
+
+ for (i = 0, k = 0; i < nr_ctxs; i++) {
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+
+ HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+ req.hash_mode_flags = vnic->hash_mode;
+
+ req.ring_grp_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_table_dma_addr +
+ i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
+ 2 * sizeof(*ring_tbl));
+ req.hash_key_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+
+ req.ring_table_pair_index = i;
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
+
+ for (j = 0; j < 64; j++) {
+ uint16_t ring_id;
+
+ /* Find next active ring. */
+ for (cnt = 0; cnt < max_rings; cnt++) {
+ if (rx_queue_state[k] !=
+ RTE_ETH_QUEUE_STATE_STOPPED)
+ break;
+ if (++k == max_rings)
+ k = 0;
+ }
+
+ /* Return if no rings are active. */
+ if (cnt == max_rings)
+ return 0;
+
+ /* Add rx/cp ring pair to RSS table. */
+ rxr = rxqs[k]->rx_ring;
+ cpr = rxqs[k]->cp_ring;
+
+ ring_id = rxr->rx_ring_struct->fw_ring_id;
+ *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+ ring_id = cpr->cp_ring_struct->fw_ring_id;
+ *ring_tbl++ = rte_cpu_to_le_16(ring_id);
+
+ if (++k == max_rings)
+ k = 0;
+ }
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+ BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ }
+
+ return rc;
+}
+
int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
unsigned int rss_idx, fw_idx, i;
+ if (!(vnic->rss_table && vnic->hash_type))
+ return 0;
+
+ if (BNXT_CHIP_THOR(bp))
+ return bnxt_vnic_rss_configure_thor(bp, vnic);
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return 0;
+
if (vnic->rss_table && vnic->hash_type) {
/*
* Fill the RSS hash & redirection table with
}
if (i == bp->rx_cp_nr_rings)
return 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
+ vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
+
return 0;
}
req->flags = rte_cpu_to_le_16(flags);
}
+static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
+{
+ struct hwrm_ring_aggint_qcaps_input req = {0};
+ struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables;
+ uint16_t flags;
+ int rc;
+
+ HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
+ agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
+
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+ agg_req->flags = rte_cpu_to_le_16(flags);
+ enables =
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
+ agg_req->enables = rte_cpu_to_le_32(enables);
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
struct bnxt_coal *coal, uint16_t ring_id)
{
bp->hwrm_cmd_resp_addr;
int rc;
- /* Set ring coalesce parameters only for Stratus 100G NIC */
- if (!bnxt_stratus_device(bp))
+ /* Set ring coalesce parameters only for 100G NICs */
+ if (BNXT_CHIP_THOR(bp)) {
+ if (bnxt_hwrm_set_coal_params_thor(bp, &req))
+ return -1;
+ } else if (bnxt_stratus_device(bp)) {
+ bnxt_hwrm_set_coal_params(coal, &req);
+ } else {
return 0;
+ }
HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
- bnxt_hwrm_set_coal_params(coal, &req);
req.ring_id = rte_cpu_to_le_16(ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
return 0;
}
+#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_input req = {0};
+ struct hwrm_func_backing_store_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int total_alloc_len;
+ int rc, i;
+
+ if (!BNXT_CHIP_THOR(bp) ||
+ bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
+ BNXT_VF(bp) ||
+ bp->ctx)
+ return 0;
+
+ HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT_SILENT();
+
+ total_alloc_len = sizeof(*ctx);
+ ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+
+ ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+ sizeof(*ctx_pg) * BNXT_MAX_Q,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx_pg) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries =
+ rte_le_to_cpu_16(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries =
+ rte_le_to_cpu_16(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
+ ctx->srq_max_l2_entries =
+ rte_le_to_cpu_16(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
+ ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
+ ctx->cq_max_l2_entries =
+ rte_le_to_cpu_16(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
+ ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
+ ctx->stat_max_entries =
+ rte_le_to_cpu_32(resp->stat_max_entries);
+ ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
+ ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries =
+ rte_le_to_cpu_32(resp->mrav_max_entries);
+ ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
+ ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
+ ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+ctx_err:
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct hwrm_func_backing_store_cfg_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ uint32_t *num_entries;
+ uint64_t *pg_dir;
+ uint8_t *pg_attr;
+ uint32_t ena;
+ int i, rc;
+
+ if (!ctx)
+ return 0;
+
+ HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
+ req.enables = rte_cpu_to_le_32(enables);
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.qp_num_qp1_entries =
+ rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries =
+ rte_cpu_to_le_16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.srq_num_l2_entries =
+ rte_cpu_to_le_16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.cq_num_l2_entries =
+ rte_cpu_to_le_16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
+ req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+
+ req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+ num_entries = &req.tqm_sp_num_entries;
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
+ pg_dir = &req.tqm_sp_page_dir;
+ ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
+ for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
{
struct hwrm_port_qstats_ext_input req = {0};
req.port_id = rte_cpu_to_le_16(pf->port_id);
if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
req.tx_stat_host_addr =
- rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
+ rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
req.tx_stat_size =
rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
}
if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
req.rx_stat_host_addr =
- rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
+ rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
req.rx_stat_size =
rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
}
return rc;
}
+
+int
+bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
+{
+ struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
+ struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
+ req.tunnel_type = type;
+ req.dest_fid = bp->fw_fid;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int
+bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
+{
+ struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
+ struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
+ req.tunnel_type = type;
+ req.dest_fid = bp->fw_fid;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
+{
+ struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
+ struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
+ req.src_fid = bp->fw_fid;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ if (type)
+ *type = rte_le_to_cpu_32(resp->tunnel_mask);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
+ uint16_t *dst_fid)
+{
+ struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
+ struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
+ req.src_fid = bp->fw_fid;
+ req.tunnel_type = tun_type;
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ if (dst_fid)
+ *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
+
+ PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_set_mac(struct bnxt *bp)
+{
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
+
+ req.enables =
+ rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+{
+ struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_drv_if_change_input req = {0};
+ uint32_t flags;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
+ return 0;
+
+ /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
+ * If we issue FUNC_DRV_IF_CHANGE with flags down before
+ * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
+ */
+ if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
+ return 0;
+
+ HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
+
+ if (up)
+ req.flags =
+ rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ flags = rte_le_to_cpu_32(resp->flags);
+ HWRM_UNLOCK();
+
+ if (!up)
+ return 0;
+
+ if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
+ PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
+ bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
+ }
+
+ return 0;
+}
+
+int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
+{
+ struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_error_recovery_info *info = bp->recovery_info;
+ struct hwrm_error_recovery_qcfg_input req = {0};
+ uint32_t flags = 0;
+ unsigned int i;
+ int rc;
+
+ /* Older FW does not have error recovery support */
+ if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ if (!info) {
+ info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
+ sizeof(*info), 0);
+ bp->recovery_info = info;
+ if (info == NULL)
+ return -ENOMEM;
+ } else {
+ memset(info, 0, sizeof(*info));
+ }
+
+ HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ flags = rte_le_to_cpu_32(resp->flags);
+ if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
+ info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
+ else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
+ info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
+
+ if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
+ !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* FW returned values are in units of 100msec */
+ info->driver_polling_freq =
+ rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
+ info->master_func_wait_period =
+ rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
+ info->normal_func_wait_period =
+ rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
+ info->master_func_wait_period_after_reset =
+ rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
+ info->max_bailout_time_after_reset =
+ rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
+ info->status_regs[BNXT_FW_STATUS_REG] =
+ rte_le_to_cpu_32(resp->fw_health_status_reg);
+ info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
+ rte_le_to_cpu_32(resp->fw_heartbeat_reg);
+ info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
+ rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
+ info->status_regs[BNXT_FW_RESET_INPROG_REG] =
+ rte_le_to_cpu_32(resp->reset_inprogress_reg);
+ info->reg_array_cnt =
+ rte_le_to_cpu_32(resp->reg_array_cnt);
+
+ if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < info->reg_array_cnt; i++) {
+ info->reset_reg[i] =
+ rte_le_to_cpu_32(resp->reset_reg[i]);
+ info->reset_reg_val[i] =
+ rte_le_to_cpu_32(resp->reset_reg_val[i]);
+ info->delay_after_reset[i] =
+ resp->delay_after_reset[i];
+ }
+err:
+ HWRM_UNLOCK();
+
+ /* Map the FW status registers */
+ if (!rc)
+ rc = bnxt_map_fw_health_status_regs(bp);
+
+ if (rc) {
+ rte_free(bp->recovery_info);
+ bp->recovery_info = NULL;
+ }
+ return rc;
+}
+
+int bnxt_hwrm_fw_reset(struct bnxt *bp)
+{
+ struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_fw_reset_input req = {0};
+ int rc;
+
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
+ HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
+
+ req.embedded_proc_type =
+ HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status =
+ HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
+ req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
+ BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
+{
+ struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_ts_query_input req = {0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t flags = 0;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
+
+ switch (path) {
+ case BNXT_PTP_FLAGS_PATH_TX:
+ flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
+ break;
+ case BNXT_PTP_FLAGS_PATH_RX:
+ flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
+ break;
+ case BNXT_PTP_FLAGS_CURRENT_TIME:
+ flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
+ break;
+ }
+
+ req.flags = rte_cpu_to_le_32(flags);
+ req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ if (timestamp) {
+ *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
+ *timestamp |=
+ (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
+ }
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ uint32_t flags = 0;
+ int rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
+ return rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
+
+ HWRM_CHECK_RESULT();
+ flags = rte_le_to_cpu_32(resp->flags);
+ HWRM_UNLOCK();
+
+ if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
+ bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
+ PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
+ }
+
+ return rc;
+}