Newer firmware advertises the number of TQM rings to allocate
context memory for. Use the firmware specified value and fall back
to the old value derived from "bp->max_q" if it is not available.
Fixes:
f8168ca0e690 ("net/bnxt: support thor controller")
Cc: stable@dpdk.org
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
#define BNXT_MAX_TC 8
#define BNXT_MAX_QUEUE 8
#define BNXT_MAX_TC_Q (BNXT_MAX_TC + 1)
-#define BNXT_MAX_Q (bp->max_q + 1)
#define BNXT_PAGE_SHFT 12
#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT)
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
uint16_t tim_entry_size;
uint32_t tim_max_entries;
uint8_t tqm_entries_multiple;
+ uint8_t tqm_fp_rings_count;
uint32_t flags;
#define BNXT_CTX_FLAG_INITED 0x01
rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
- for (i = 0; i < BNXT_MAX_Q; i++) {
+ for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
if (bp->ctx->tqm_mem[i])
rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
}
entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
- for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
+ for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- /* use min tqm entries for now. */
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
int total_alloc_len;
- int rc, i;
+ int rc, i, tqm_rings;
if (!BNXT_CHIP_THOR(bp) ||
bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
goto ctx_err;
}
- ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
- sizeof(*ctx_pg) * BNXT_MAX_Q,
- RTE_CACHE_LINE_SIZE);
- if (!ctx_pg) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
ctx->qp_min_qp1_entries =
rte_le_to_cpu_16(resp->qp_min_qp1_entries);
ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+
+ ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
+ sizeof(*ctx_pg) * tqm_rings,
+ RTE_CACHE_LINE_SIZE);
+ if (!ctx_pg) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
ctx_err:
HWRM_UNLOCK();
return rc;