mempool/octeontx2: add context dump support
authorJerin Jacob <jerinj@marvell.com>
Sat, 22 Jun 2019 13:24:10 +0000 (18:54 +0530)
committerThomas Monjalon <thomas@monjalon.net>
Tue, 25 Jun 2019 21:35:57 +0000 (23:35 +0200)
Add a helper function to dump aura and pool context for NPA debugging.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Vivek Sharma <viveksharma@marvell.com>
drivers/mempool/octeontx2/Makefile
drivers/mempool/octeontx2/meson.build
drivers/mempool/octeontx2/otx2_mempool.h
drivers/mempool/octeontx2/otx2_mempool_debug.c [new file with mode: 0644]
drivers/mempool/octeontx2/otx2_mempool_irq.c

index 86950b2..b86d469 100644 (file)
@@ -29,7 +29,8 @@ LIBABIVER := 1
 #
 SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL) += \
        otx2_mempool.c          \
-       otx2_mempool_irq.c
+       otx2_mempool_irq.c      \
+       otx2_mempool_debug.c
 
 LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf
 LDLIBS += -lrte_common_octeontx2 -lrte_kvargs -lrte_bus_pci
index 3f93b50..ab306b7 100644 (file)
@@ -4,6 +4,7 @@
 
 sources = files('otx2_mempool.c',
                'otx2_mempool_irq.c',
+               'otx2_mempool_debug.c'
                )
 
 extra_flags = []
index 41542cf..efaa308 100644 (file)
@@ -202,4 +202,7 @@ int otx2_npa_lf_fini(void);
 int otx2_npa_register_irqs(struct otx2_npa_lf *lf);
 void otx2_npa_unregister_irqs(struct otx2_npa_lf *lf);
 
+/* Debug */
+int otx2_mempool_ctx_dump(struct otx2_npa_lf *lf);
+
 #endif /* __OTX2_MEMPOOL_H__ */
diff --git a/drivers/mempool/octeontx2/otx2_mempool_debug.c b/drivers/mempool/octeontx2/otx2_mempool_debug.c
new file mode 100644 (file)
index 0000000..eef61ef
--- /dev/null
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_mempool.h"
+
+#define npa_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__)
+
+static inline void
+npa_lf_pool_dump(struct npa_pool_s *pool)
+{
+       npa_dump("W0: Stack base\t\t0x%"PRIx64"", pool->stack_base);
+       npa_dump("W1: ena \t\t%d\nW1: nat_align \t\t%d\nW1: stack_caching \t%d",
+               pool->ena, pool->nat_align, pool->stack_caching);
+       npa_dump("W1: stack_way_mask\t%d\nW1: buf_offset\t\t%d",
+               pool->stack_way_mask, pool->buf_offset);
+       npa_dump("W1: buf_size \t\t%d", pool->buf_size);
+
+       npa_dump("W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d",
+               pool->stack_max_pages, pool->stack_pages);
+
+       npa_dump("W3: op_pc \t\t0x%"PRIx64"", (uint64_t)pool->op_pc);
+
+       npa_dump("W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d",
+               pool->stack_offset, pool->shift, pool->avg_level);
+       npa_dump("W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d",
+               pool->avg_con, pool->fc_ena, pool->fc_stype);
+       npa_dump("W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d",
+               pool->fc_hyst_bits, pool->fc_up_crossing);
+       npa_dump("W4: update_time\t\t%d\n", pool->update_time);
+
+       npa_dump("W5: fc_addr\t\t0x%"PRIx64"\n", pool->fc_addr);
+
+       npa_dump("W6: ptr_start\t\t0x%"PRIx64"\n", pool->ptr_start);
+
+       npa_dump("W7: ptr_end\t\t0x%"PRIx64"\n", pool->ptr_end);
+       npa_dump("W8: err_int\t\t%d\nW8: err_int_ena\t\t%d",
+               pool->err_int, pool->err_int_ena);
+       npa_dump("W8: thresh_int\t\t%d", pool->thresh_int);
+
+       npa_dump("W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d",
+               pool->thresh_int_ena, pool->thresh_up);
+       npa_dump("W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d",
+               pool->thresh_qint_idx, pool->err_qint_idx);
+}
+
+static inline void
+npa_lf_aura_dump(struct npa_aura_s *aura)
+{
+       npa_dump("W0: Pool addr\t\t0x%"PRIx64"\n", aura->pool_addr);
+
+       npa_dump("W1: ena\t\t\t%d\nW1: pool caching\t%d\nW1: pool way mask\t%d",
+               aura->ena, aura->pool_caching, aura->pool_way_mask);
+       npa_dump("W1: avg con\t\t%d\nW1: pool drop ena\t%d",
+               aura->avg_con, aura->pool_drop_ena);
+       npa_dump("W1: aura drop ena\t%d", aura->aura_drop_ena);
+       npa_dump("W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\nW1: aura shift\t\t%d",
+               aura->bp_ena, aura->aura_drop, aura->shift);
+       npa_dump("W1: avg_level\t\t%d\n", aura->avg_level);
+
+       npa_dump("W2: count\t\t%"PRIx64"\nW2: nix0_bpid\t\t%d",
+               (uint64_t)aura->count, aura->nix0_bpid);
+       npa_dump("W2: nix1_bpid\t\t%d", aura->nix1_bpid);
+
+       npa_dump("W3: limit\t\t%"PRIx64"\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
+               (uint64_t)aura->limit, aura->bp, aura->fc_ena);
+       npa_dump("W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d",
+               aura->fc_up_crossing, aura->fc_stype);
+
+       npa_dump("W3: fc_hyst_bits\t%d", aura->fc_hyst_bits);
+
+       npa_dump("W4: fc_addr\t\t0x%"PRIx64"\n", aura->fc_addr);
+
+       npa_dump("W5: pool_drop\t\t%d\nW5: update_time\t\t%d",
+               aura->pool_drop, aura->update_time);
+       npa_dump("W5: err_int\t\t%d",  aura->err_int);
+       npa_dump("W5: err_int_ena\t\t%d\nW5: thresh_int\t\t%d",
+               aura->err_int_ena, aura->thresh_int);
+       npa_dump("W5: thresh_int_ena\t%d", aura->thresh_int_ena);
+
+       npa_dump("W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d",
+               aura->thresh_up, aura->thresh_qint_idx);
+       npa_dump("W5: err_qint_idx\t%d", aura->err_qint_idx);
+
+       npa_dump("W6: thresh\t\t%"PRIx64"\n", (uint64_t)aura->thresh);
+}
+
+int
+otx2_mempool_ctx_dump(struct otx2_npa_lf *lf)
+{
+       struct npa_aq_enq_req *aq;
+       struct npa_aq_enq_rsp *rsp;
+       uint32_t q;
+       int rc;
+
+       for (q = 0; q < lf->nr_pools; q++) {
+               /* Skip disabled POOL */
+               if (rte_bitmap_get(lf->npa_bmp, q))
+                       continue;
+
+               aq = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
+               aq->aura_id = q;
+               aq->ctype = NPA_AQ_CTYPE_POOL;
+               aq->op = NPA_AQ_INSTOP_READ;
+
+               rc = otx2_mbox_process_msg(lf->mbox, (void *)&rsp);
+               if (rc) {
+                       otx2_err("Failed to get pool(%d) context", q);
+                       return rc;
+               }
+               npa_dump("============== pool=%d ===============\n", q);
+               npa_lf_pool_dump(&rsp->pool);
+       }
+
+       for (q = 0; q < lf->nr_pools; q++) {
+               /* Skip disabled AURA */
+               if (rte_bitmap_get(lf->npa_bmp, q))
+                       continue;
+
+               aq = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
+               aq->aura_id = q;
+               aq->ctype = NPA_AQ_CTYPE_AURA;
+               aq->op = NPA_AQ_INSTOP_READ;
+
+               rc = otx2_mbox_process_msg(lf->mbox, (void *)&rsp);
+               if (rc) {
+                       otx2_err("Failed to get aura(%d) context", q);
+                       return rc;
+               }
+               npa_dump("============== aura=%d ===============\n", q);
+               npa_lf_aura_dump(&rsp->aura);
+       }
+
+       return rc;
+}
index c026e1e..ce41044 100644 (file)
@@ -199,6 +199,7 @@ npa_lf_q_irq(void *param)
 
        /* Clear interrupt */
        otx2_write64(intr, lf->base + NPA_LF_QINTX_INT(qintx));
+       otx2_mempool_ctx_dump(lf);
 }
 
 static int