#include "roc_api.h"
#include "roc_priv.h"
+static roc_npa_lf_init_cb_t lf_init_cb;
+
+int
+roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
+{
+ if (lf_init_cb != NULL)
+ return -EEXIST;
+
+ lf_init_cb = cb;
+ return 0;
+}
+
void
roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
return 0;
}
+int
+roc_npa_pool_op_pc_reset(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aq_enq_req *pool_req;
+ struct npa_aq_enq_rsp *pool_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox_dev *mdev;
+ int rc = -ENOSPC, off;
+ struct mbox *mbox;
+
+ if (lf == NULL)
+ return NPA_ERR_PARAM;
+
+ mbox = lf->mbox;
+ mdev = &mbox->dev[0];
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+
+ pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (pool_req == NULL)
+ return rc;
+ pool_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_WRITE;
+ pool_req->pool.op_pc = 0;
+ pool_req->pool_mask.op_pc = ~pool_req->pool_mask.op_pc;
+
+ rc = mbox_process(mbox);
+ if (rc < 0)
+ return rc;
+
+ off = mbox->rx_start +
+ PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
+
+ if (pool_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL)
+ return -ENOSPC;
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ return NPA_ERR_AURA_POOL_FINI;
+ }
+ return 0;
+}
static inline char *
npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
{
{
const char *mz_name = npa_stack_memzone_name(lf, pool_id, name);
- return plt_memzone_reserve_cache_align(mz_name, size);
+ return plt_memzone_reserve_aligned(mz_name, size, 0, ROC_ALIGN);
}
static inline int
/* Block size should be cache line aligned and in range of 128B-128KB */
if (block_size % ROC_ALIGN || block_size < 128 ||
- block_size > 128 * 1024)
+ block_size > ROC_NPA_MAX_BLOCK_SZ)
return NPA_ERR_INVALID_BLOCK_SZ;
pos = 0;
/* Update aura fields */
aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
aura->ena = 1;
- aura->shift = __builtin_clz(block_count) - 8;
+ aura->shift = plt_log2_u32(block_count);
+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
aura->limit = block_count;
aura->pool_caching = 1;
aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ aura->avg_con = ROC_NPA_AVG_CONT;
/* Many to one reduction */
aura->err_qint_idx = aura_id % lf->qints;
pool->ena = 1;
pool->buf_size = block_size / ROC_ALIGN;
pool->stack_max_pages = stack_size;
- pool->shift = __builtin_clz(block_count) - 8;
+ pool->shift = plt_log2_u32(block_count);
+ pool->shift = pool->shift < 8 ? 0 : pool->shift - 8;
pool->ptr_start = 0;
pool->ptr_end = ~0;
pool->stack_caching = 1;
pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
+ pool->avg_con = ROC_NPA_AVG_CONT;
/* Many to one reduction */
pool->err_qint_idx = pool_id % lf->qints;
if (__atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0)
return 0;
+ if (lf_init_cb) {
+ rc = (*lf_init_cb)(pci_dev);
+ if (rc)
+ goto fail;
+ }
+
rc = npa_attach(dev->mbox);
if (rc)
goto fail;
lf->pf_func = dev->pf_func;
lf->npa_msixoff = npa_msixoff;
- lf->intr_handle = &pci_dev->intr_handle;
+ lf->intr_handle = pci_dev->intr_handle;
lf->pci_dev = pci_dev;
idev->npa_pf_func = dev->pf_func;