struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_nix_ipsec_cfg cfg;
size_t inb_sa_sz;
- int rc;
+ int rc, i;
+ void *sa;
/* CN9K SA size is different */
if (roc_model_is_cn9k())
plt_err("Failed to allocate memory for Inbound SA");
return -ENOMEM;
}
+ if (roc_model_is_cn10k()) {
+ for (i = 0; i < ipsec_in_max_spi; i++) {
+ sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
+ roc_ot_ipsec_inb_sa_init(sa, true);
+ }
+ }
memset(&cfg, 0, sizeof(cfg));
cfg.sa_size = inb_sa_sz;
uintptr_t
roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
+ struct nix *nix = NULL;
if (idev == NULL)
return 0;
- if (!nix->inl_inb_ena)
- return 0;
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ }
- inl_dev = idev->nix_inl_dev;
if (inb_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
/* Return inline dev sa base */
if (inl_dev)
return (uintptr_t)inl_dev->inb_sa_base;
uint32_t
roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
+ struct nix *nix;
if (idev == NULL)
return 0;
- if (!nix->inl_inb_ena)
- return 0;
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ }
- inl_dev = idev->nix_inl_dev;
if (inb_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
if (inl_dev)
return inl_dev->ipsec_in_max_spi;
return 0;
uint32_t
roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
+ struct nix *nix;
if (idev == NULL)
return 0;
- if (!inl_dev_sa)
- return nix->inb_sa_sz;
+ if (!inl_dev_sa && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!inl_dev_sa)
+ return nix->inb_sa_sz;
+ }
- inl_dev = idev->nix_inl_dev;
- if (inl_dev_sa && inl_dev)
- return inl_dev->inb_sa_sz;
+ if (inl_dev_sa) {
+ inl_dev = idev->nix_inl_dev;
+ if (inl_dev)
+ return inl_dev->inb_sa_sz;
+ }
- /* On error */
return 0;
}
nix->inl_inb_ena = false;
+ /* Flush Inbound CTX cache entries */
+ roc_nix_cpt_ctx_cache_sync(roc_nix);
+
/* Disable Inbound SA */
return nix_inl_sa_tbl_release(roc_nix);
}
void *sa_base;
size_t sa_sz;
int i, j, rc;
+ void *sa;
if (idev == NULL)
return -ENOTSUP;
/* Retrieve inline device if present */
inl_dev = idev->nix_inl_dev;
sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
+ /* Use sso_pffunc if explicitly requested */
+ if (roc_nix->ipsec_out_sso_pffunc)
+ sso_pffunc = idev_sso_pffunc_get();
+
if (!sso_pffunc) {
plt_err("Failed to setup inline outb, need either "
"inline device or sso device");
eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
- rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, true);
+ rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
+ !roc_nix->ipsec_out_sso_pffunc);
if (rc) {
plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
goto lf_detach;
plt_err("Outbound SA base alloc failed");
goto lf_fini;
}
+ if (roc_model_is_cn10k()) {
+ for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
+ sa = ((uint8_t *)sa_base) + (i * sa_sz);
+ roc_ot_ipsec_outb_sa_init(sa);
+ }
+ }
nix->outb_sa_base = sa_base;
nix->outb_sa_sz = sa_sz;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
inl_rq->lpb_size = rq->lpb_size;
+ inl_rq->lpb_drop_ena = true;
+ inl_rq->spb_ena = rq->spb_ena;
+ inl_rq->spb_aura_handle = rq->spb_aura_handle;
+ inl_rq->spb_size = rq->spb_size;
+ inl_rq->spb_drop_ena = !!rq->spb_ena;
if (!roc_model_is_cn9k()) {
uint64_t aura_limit =
roc_npa_aura_op_limit_get(inl_rq->aura_handle);
uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
if (aura_shift < 8)
aura_shift = 0;
else
aura_shift = aura_shift - 8;
- /* Set first pass RQ to drop when half of the buffers are in
+ /* Set first pass RQ to drop after part of buffers are in
* use to avoid metabuf alloc failure. This is needed as long
- * as we cannot use different
+ * as we cannot use different aura.
*/
- inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
- inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
+ drop_pc = inl_dev->lpb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
+ }
+
+ if (inl_rq->spb_ena) {
+ uint64_t aura_limit =
+ roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+ uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
+
+ if (aura_shift < 8)
+ aura_shift = 0;
+ else
+ aura_shift = aura_shift - 8;
+
+ /* Set first pass RQ to drop after part of buffers are in
+ * use to avoid metabuf alloc failure. This is needed as long
+ * as we cannot use different aura.
+ */
+ drop_pc = inl_dev->spb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
}
/* Enable IPSec */
inl_rq->tag_mask = 0xFFF00000;
inl_rq->tt = SSO_TT_ORDERED;
inl_rq->hwgrp = 0;
- inl_rq->wqe_skip = 1;
+ inl_rq->wqe_skip = inl_dev->wqe_skip;
inl_rq->sso_ena = true;
/* Prepare and send RQ init mbox */
if (rc)
plt_err("Failed to disable inline device rq, rc=%d", rc);
+ roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
+ if (inl_rq->spb_ena)
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
+
/* Flush NIX LF for CN10K */
- if (roc_model_is_cn10k())
- plt_write64(0, inl_dev->nix_base + NIX_LF_OP_VWQE_FLUSH);
+ nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
return rc;
}
roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
enum roc_nix_inl_sa_sync_op op)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct roc_cpt_lf *outb_lf = nix->cpt_lf_base;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ struct roc_cpt_lf *outb_lf = NULL;
union cpt_lf_ctx_reload reload;
union cpt_lf_ctx_flush flush;
+ bool get_inl_lf = true;
uintptr_t rbase;
+ struct nix *nix;
/* Nothing much to do on cn9k */
if (roc_model_is_cn9k()) {
return 0;
}
- if (!inb && !outb_lf)
+ if (idev)
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev && roc_nix == NULL)
return -EINVAL;
- /* Performing op via outbound lf is enough
- * when inline dev is not in use.
- */
- if (outb_lf && !nix->inb_inl_dev) {
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ outb_lf = nix->cpt_lf_base;
+ if (inb && !nix->inb_inl_dev)
+ get_inl_lf = false;
+ }
+
+ if (inb && get_inl_lf) {
+ outb_lf = NULL;
+ if (inl_dev && inl_dev->attach_cptlf)
+ outb_lf = &inl_dev->cpt_lf;
+ }
+
+ if (outb_lf) {
rbase = outb_lf->rbase;
flush.u = 0;
}
return 0;
}
+ plt_err("Could not get CPT LF for SA sync");
+ return -ENOTSUP;
+}
+
+int
+roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
+ bool inb, uint16_t sa_len)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ struct roc_cpt_lf *outb_lf = NULL;
+ union cpt_lf_ctx_flush flush;
+ bool get_inl_lf = true;
+ uintptr_t rbase;
+ struct nix *nix;
+ int rc;
+ /* Nothing much to do on cn9k */
+ if (roc_model_is_cn9k()) {
+ plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+ return 0;
+ }
+
+ if (idev)
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ outb_lf = nix->cpt_lf_base;
+
+ if (inb && !nix->inb_inl_dev)
+ get_inl_lf = false;
+ }
+
+ if (inb && get_inl_lf) {
+ outb_lf = NULL;
+ if (inl_dev && inl_dev->attach_cptlf)
+ outb_lf = &inl_dev->cpt_lf;
+ }
+
+ if (outb_lf) {
+ rbase = outb_lf->rbase;
+ flush.u = 0;
+
+ rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
+ if (rc)
+ return rc;
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
+ plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
+
+ return 0;
+ }
+ plt_nix_dbg("Could not get CPT LF for CTX write");
return -ENOTSUP;
}