struct plt_intr_handle *handle;
int rc, vec;
- handle = &pci_dev->intr_handle;
+ handle = pci_dev->intr_handle;
vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
/* Clear err interrupt */
struct plt_intr_handle *handle;
int vec;
- handle = &pci_dev->intr_handle;
+ handle = pci_dev->intr_handle;
vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
/* Clear err interrupt */
struct plt_intr_handle *handle;
int rc, vec;
- handle = &pci_dev->intr_handle;
+ handle = pci_dev->intr_handle;
vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
struct plt_intr_handle *handle;
int vec;
- handle = &pci_dev->intr_handle;
+ handle = pci_dev->intr_handle;
vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
return -EINVAL;
req = mbox_alloc_msg_cpt_lf_alloc(mbox);
+ if (!req)
+ return -ENOSPC;
+
req->nix_pf_func = 0;
if (inl_dev_sso && nix_inl_dev_pffunc_get())
req->sso_pf_func = nix_inl_dev_pffunc_get();
if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
- /* Update nb_desc to next power of 2 to aid in pending queue checks */
- lf->nb_desc = plt_align32pow2(lf->nb_desc);
-
/* Allocate memory for instruction queue for CPT LF. */
iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
if (iq_mem == NULL)
}
int
-roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, uint64_t cptr)
+roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr, bool inval)
{
union cpt_lf_ctx_flush reg;
- if (lf == NULL)
+ if (lf == NULL) {
+ plt_err("Could not trigger CTX flush");
return -ENOTSUP;
+ }
reg.u = 0;
- reg.s.pf_func = lf->pf_func;
- reg.s.inval = 1;
- reg.s.cptr = cptr;
+ reg.s.inval = inval;
+ reg.s.cptr = (uintptr_t)cptr >> 7;
plt_write64(reg.u, lf->rbase + CPT_LF_CTX_FLUSH);
return 0;
}
+int
+roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr)
+{
+ union cpt_lf_ctx_reload reg;
+
+ if (lf == NULL) {
+ plt_err("Could not trigger CTX reload");
+ return -ENOTSUP;
+ }
+
+ reg.u = 0;
+ reg.s.cptr = (uintptr_t)cptr >> 7;
+
+ plt_write64(reg.u, lf->rbase + CPT_LF_CTX_RELOAD);
+
+ return 0;
+}
+
void
cpt_lf_fini(struct roc_cpt_lf *lf)
{
void
roc_cpt_iq_disable(struct roc_cpt_lf *lf)
{
+ volatile union cpt_lf_q_grp_ptr grp_ptr = {.u = 0x0};
+ volatile union cpt_lf_inprog lf_inprog = {.u = 0x0};
union cpt_lf_ctl lf_ctl = {.u = 0x0};
- union cpt_lf_q_grp_ptr grp_ptr;
- union cpt_lf_inprog lf_inprog;
int timeout = 20;
int cnt;
return 0;
}
+
+int
+roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
+ uint16_t sa_len)
+{
+ uintptr_t lmt_base = lf->lmt_base;
+ union cpt_res_s res, *hw_res;
+ uint64_t lmt_arg, io_addr;
+ struct cpt_inst_s *inst;
+ uint16_t lmt_id;
+ uint64_t *dptr;
+ int i;
+
+ ROC_LMT_CPT_BASE_ID_GET(lmt_base, lmt_id);
+ inst = (struct cpt_inst_s *)lmt_base;
+
+ memset(inst, 0, sizeof(struct cpt_inst_s));
+
+ hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+ if (hw_res == NULL) {
+ plt_err("Couldn't allocate memory for result address");
+ return -ENOMEM;
+ }
+
+ dptr = plt_zmalloc(sa_len, 8);
+ if (dptr == NULL) {
+ plt_err("Couldn't allocate memory for SA dptr");
+ plt_free(hw_res);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < (sa_len / 8); i++)
+ dptr[i] = plt_cpu_to_be_64(((uint64_t *)sa_dptr)[i]);
+
+ /* Fill CPT_INST_S for WRITE_SA microcode op */
+ hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
+ inst->res_addr = (uint64_t)hw_res;
+ inst->dptr = (uint64_t)dptr;
+ inst->w4.s.param2 = sa_len >> 3;
+ inst->w4.s.dlen = sa_len;
+ inst->w4.s.opcode_major = ROC_IE_OT_MAJOR_OP_WRITE_SA;
+ inst->w4.s.opcode_minor = ROC_IE_OT_MINOR_OP_WRITE_SA;
+ inst->w7.s.cptr = (uint64_t)sa_cptr;
+ inst->w7.s.ctx_val = 1;
+ inst->w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
+
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
+ io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
+
+ roc_lmt_submit_steorl(lmt_arg, io_addr);
+ plt_io_wmb();
+
+ /* Use 1 min timeout for the poll */
+ const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+ /* Wait until CPT instruction completes */
+ do {
+ res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+ if (unlikely(plt_tsc_cycles() > timeout))
+ break;
+ } while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
+
+ plt_free(dptr);
+ plt_free(hw_res);
+
+ if (res.cn10k.compcode != CPT_COMP_WARN) {
+ plt_err("Write SA operation timed out");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+ uint16_t ctx_len, uint8_t egrp)
+{
+ union cpt_res_s res, *hw_res;
+ struct cpt_inst_s inst;
+ uint64_t lmt_status;
+ int ret = 0;
+
+ hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+ if (unlikely(hw_res == NULL)) {
+ plt_err("Couldn't allocate memory for result address");
+ return -ENOMEM;
+ }
+
+ hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+
+ inst.w4.s.opcode_major = opcode;
+ inst.w4.s.opcode_minor = ctx_len >> 3;
+ inst.w4.s.param1 = 0;
+ inst.w4.s.param2 = 0;
+ inst.w4.s.dlen = ctx_len;
+ inst.dptr = rte_mempool_virt2iova(sa);
+ inst.rptr = 0;
+ inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+ inst.w7.s.egrp = egrp;
+
+ inst.w0.u64 = 0;
+ inst.w2.u64 = 0;
+ inst.w3.u64 = 0;
+ inst.res_addr = (uintptr_t)hw_res;
+
+ rte_io_wmb();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ roc_lmt_mov64((void *)lf->lmt_base, &inst);
+ lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+ } while (lmt_status == 0);
+
+ const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+ /* Wait until CPT instruction completes */
+ do {
+ res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+ if (unlikely(plt_tsc_cycles() > timeout)) {
+ plt_err("Request timed out");
+ ret = -ETIMEDOUT;
+ goto free;
+ }
+ } while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+ if (unlikely(res.cn9k.compcode != CPT_COMP_GOOD)) {
+ ret = res.cn9k.compcode;
+ switch (ret) {
+ case CPT_COMP_INSTERR:
+ plt_err("Request failed with instruction error");
+ break;
+ case CPT_COMP_FAULT:
+ plt_err("Request failed with DMA fault");
+ break;
+ case CPT_COMP_HWERR:
+ plt_err("Request failed with hardware error");
+ break;
+ default:
+ plt_err("Request failed with unknown hardware completion code : 0x%x",
+ ret);
+ }
+ ret = -EINVAL;
+ goto free;
+ }
+
+ if (unlikely(res.cn9k.uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
+ ret = res.cn9k.uc_compcode;
+ switch (ret) {
+ case ROC_IE_ON_AUTH_UNSUPPORTED:
+ plt_err("Invalid auth type");
+ break;
+ case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
+ plt_err("Invalid encrypt type");
+ break;
+ default:
+ plt_err("Request failed with unknown microcode completion code : 0x%x",
+ ret);
+ }
+ ret = -ENOTSUP;
+ }
+
+free:
+ plt_free(hw_res);
+ return ret;
+}