#define HWRM_CMD_TIMEOUT 6000000
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
+#define HWRM_VERSION_1_9_2 0x10903
struct bnxt_plcmodes_cfg {
uint32_t flags;
return 1 << page_getenum(size);
}
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
+ uint8_t *pg_attr,
+ uint64_t *pg_dir)
+{
+ if (rmem->nr_pages > 1) {
+ *pg_attr = 1;
+ *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
+ }
+}
+
/*
* HWRM Functions (sent to HWRM)
* These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
uint16_t mb_trigger_offset = use_kong_mb ?
GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
- if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ if (bp->flags & BNXT_FLAG_SHORT_CMD ||
+ msg_len > bp->max_req_len) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
- memset(short_cmd_req, 0, bp->max_req_len);
+ memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
memcpy(short_cmd_req, req, msg_len);
short_input.req_type = rte_cpu_to_le_16(req->req_type);
for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
/* Sanity check on the resp->resp_len */
rte_rmb();
- if (resp->resp_len && resp->resp_len <=
- bp->max_resp_len) {
+ if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
/* Last byte of resp contains the valid key */
valid = (uint8_t *)resp + resp->resp_len - 1;
if (*valid == HWRM_RESP_VALID_KEY)
rc = __bnxt_hwrm_func_qcaps(bp);
if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_hwrm_func_resc_qcaps(bp);
if (!rc)
bp->flags |= BNXT_FLAG_NEW_RM;
HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
- req.enables = rte_cpu_to_le_32
- (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
+ enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ }
req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
bp->tx_nr_rings);
- req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
if (bp->vf_resv_strategy ==
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
- enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
- req.enables |= rte_cpu_to_le_32(enables);
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
+ if (test && BNXT_HAS_RING_GRPS(bp))
+ flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
+
req.flags = rte_cpu_to_le_32(flags);
+ req.enables |= rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
+ bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
if (bp->vf_resv_strategy >
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
- max_resp_len = resp->max_resp_len;
+ bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
+ if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
if (bp->max_resp_len != max_resp_len) {
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
PMD_DRV_LOG(DEBUG, "Short command supported\n");
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+ }
+
+ if (((dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg &
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
+ bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
+ sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
rte_free(bp->hwrm_short_cmd_req_addr);
- bp->hwrm_short_cmd_req_addr = rte_malloc(type,
- bp->max_req_len, 0);
+ bp->hwrm_short_cmd_req_addr =
+ rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
if (bp->hwrm_short_cmd_req_addr == NULL) {
rc = -ENOMEM;
goto error;
rc = -ENOMEM;
goto error;
}
-
- bp->flags |= BNXT_FLAG_SHORT_CMD;
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
}
}
}
+
+ bp->max_tc = resp->max_configurable_queues;
+ bp->max_lltc = resp->max_configurable_lossless_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+ bp->max_q = bp->max_tc;
+
PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
return rc;
uint32_t enables = 0;
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct rte_mempool *mb_pool;
+ uint16_t rx_buf_size;
HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+ req.ring_type = ring_type;
+ req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
- /* FALLTHROUGH */
+ if (stats_ctx_id != INVALID_STATS_CTX_ID)
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
- req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+ if (BNXT_CHIP_THOR(bp)) {
+ mb_pool = bp->rx_queues[0]->mb_pool;
+ rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
+ }
if (stats_ctx_id != INVALID_STATS_CTX_ID)
enables |=
- HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
break;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
req.ring_type = ring_type;
- /*
- * TODO: Some HWRM versions crash with
- * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
- */
+ if (BNXT_HAS_NQ(bp)) {
+ /* Association of cp ring with nq */
+ req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
+ enables |=
+ HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
+ }
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+ req.ring_type = ring_type;
+ req.page_size = BNXT_PAGE_SHFT;
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+ req.ring_type = ring_type;
+ req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
+
+ mb_pool = bp->rx_queues[0]->mb_pool;
+ rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
+ enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
+ break;
default:
PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
- case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
- case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ "hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
- case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc rx agg failed. rc:%d\n",
+ rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
+ return rc;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_alloc nq failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_free nq failed. rc:%d\n", rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+ PMD_DRV_LOG(ERR,
+ "hwrm_ring_free agg failed. rc:%d\n", rc);
+ return rc;
default:
PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
- req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
- cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+ cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
HWRM_UNLOCK();
HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
- req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
struct hwrm_vnic_alloc_input req = { 0 };
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ goto skip_ring_grps;
+
/* map ring groups to this vnic */
PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
+
+skip_ring_grps:
vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t ctx_enable_flag = 0;
struct bnxt_plcmodes_cfg pmodes;
+ uint32_t enables = 0;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
+ if (BNXT_CHIP_THOR(bp)) {
+ struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ req.default_rx_ring_id =
+ rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
+ req.default_cmpl_ring_id =
+ rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
+ enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
+ HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
+ goto config_mru;
+ }
+
/* Only RSS support for now TBD: COS & LB */
- req.enables =
- rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
+ enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
if (vnic->lb_rule != 0xffff)
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
if (vnic->cos_rule != 0xffff)
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
- if (vnic->rss_rule != 0xffff) {
+ if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
}
- req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
- req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ enables |= ctx_enable_flag;
req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
+
+config_mru:
+ req.enables = rte_cpu_to_le_32(enables);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mru = rte_cpu_to_le_16(vnic->mru);
/* Configure default VNIC only once. */
if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
- if (vnic->rss_rule == 0xffff) {
+ if (vnic->rss_rule == (uint16_t)HWRM_NA_SIGNATURE) {
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- vnic->rss_rule = INVALID_HW_RING_ID;
+ vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
return rc;
}
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_CHIP_THOR(bp))
+ return 0;
+
HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
if (enable) {
uint16_t idx;
uint32_t rc = 0;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ return 0;
+
for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
return rc;
}
+static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+}
+
static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
ring = rxr->ag_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
+ BNXT_CHIP_THOR(bp) ?
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
HWRM_RING_FREE_INPUT_RING_TYPE_RX);
ring->fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->ag_buf_ring, 0,
rxr->ag_prod = 0;
bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
+ if (rxq->nq_ring)
+ bnxt_free_nq_ring(bp, rxq->nq_ring);
+ }
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ if (txq->nq_ring)
+ bnxt_free_nq_ring(bp, txq->nq_ring);
}
}
uint16_t i;
uint32_t rc = 0;
+ if (!BNXT_HAS_RING_GRPS(bp))
+ return 0;
+
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rc = bnxt_hwrm_ring_grp_alloc(bp, i);
if (rc)
{
struct hwrm_func_cfg_input req = {0};
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables;
int rc;
- req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
- HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
- HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+ } else if (BNXT_HAS_NQ(bp)) {
+ enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
+ req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+ }
+
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
- req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(enables);
HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
bp->pf.func_cfg_flags |=
HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+ rc = __bnxt_hwrm_func_qcaps(bp);
return rc;
}
return 0;
}
+#define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_input req = {0};
+ struct hwrm_func_backing_store_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ if (!BNXT_CHIP_THOR(bp) ||
+ bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
+ BNXT_VF(bp) ||
+ bp->ctx)
+ return 0;
+
+ HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT_SILENT();
+
+ if (!rc) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int total_alloc_len;
+ int i;
+
+ total_alloc_len = sizeof(*ctx);
+ ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ memset(ctx, 0, total_alloc_len);
+
+ ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+ sizeof(*ctx_pg) * BNXT_MAX_Q,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx_pg) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries =
+ rte_le_to_cpu_16(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries =
+ rte_le_to_cpu_16(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
+ ctx->srq_max_l2_entries =
+ rte_le_to_cpu_16(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
+ ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
+ ctx->cq_max_l2_entries =
+ rte_le_to_cpu_16(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
+ ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
+ ctx->stat_max_entries =
+ rte_le_to_cpu_32(resp->stat_max_entries);
+ ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
+ ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries =
+ rte_le_to_cpu_32(resp->mrav_max_entries);
+ ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
+ ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
+ ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+ } else {
+ rc = 0;
+ }
+ctx_err:
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct hwrm_func_backing_store_cfg_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ uint32_t *num_entries;
+ uint64_t *pg_dir;
+ uint8_t *pg_attr;
+ uint32_t ena;
+ int i, rc;
+
+ if (!ctx)
+ return 0;
+
+ HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
+ req.enables = rte_cpu_to_le_32(enables);
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.qp_num_qp1_entries =
+ rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries =
+ rte_cpu_to_le_16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.srq_num_l2_entries =
+ rte_cpu_to_le_16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
+ req.cq_num_l2_entries =
+ rte_cpu_to_le_16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
+ req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+
+ req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+ num_entries = &req.tqm_sp_num_entries;
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
+ pg_dir = &req.tqm_sp_page_dir;
+ ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
+ for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
+
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
{
struct hwrm_port_qstats_ext_input req = {0};