#include "roc_api.h"
#include "roc_priv.h"
+uint32_t soft_exp_consumer_cnt;
+
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
static int
nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
{
- uint16_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
+ uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
+ uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_nix_ipsec_cfg cfg;
+ uint64_t max_sa, i;
size_t inb_sa_sz;
+ void *sa;
int rc;
+ max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
+
/* CN9K SA size is different */
if (roc_model_is_cn9k())
inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
/* Alloc contiguous memory for Inbound SA's */
nix->inb_sa_sz = inb_sa_sz;
- nix->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
+ nix->inb_spi_mask = max_sa - 1;
+ nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
ROC_NIX_INL_SA_BASE_ALIGN);
if (!nix->inb_sa_base) {
plt_err("Failed to allocate memory for Inbound SA");
return -ENOMEM;
}
+ if (roc_model_is_cn10k()) {
+ for (i = 0; i < max_sa; i++) {
+ sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
+ roc_ot_ipsec_inb_sa_init(sa, true);
+ }
+ }
memset(&cfg, 0, sizeof(cfg));
cfg.sa_size = inb_sa_sz;
cfg.iova = (uintptr_t)nix->inb_sa_base;
- cfg.max_sa = ipsec_in_max_spi + 1;
+ cfg.max_sa = max_sa;
cfg.tt = SSO_TT_ORDERED;
/* Setup device specific inb SA table */
uintptr_t
roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
+ struct nix *nix = NULL;
if (idev == NULL)
return 0;
- if (!nix->inl_inb_ena)
- return 0;
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ }
- inl_dev = idev->nix_inl_dev;
if (inb_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
/* Return inline dev sa base */
if (inl_dev)
return (uintptr_t)inl_dev->inb_sa_base;
}
uint32_t
-roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
+roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
+ uint32_t *min_spi, uint32_t *max_spi)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
+ uint32_t min = 0, max = 0, mask = 0;
struct nix_inl_dev *inl_dev;
+ struct nix *nix = NULL;
if (idev == NULL)
return 0;
- if (!nix->inl_inb_ena)
- return 0;
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
inl_dev = idev->nix_inl_dev;
if (inb_inl_dev) {
- if (inl_dev)
- return inl_dev->ipsec_in_max_spi;
- return 0;
+ if (inl_dev == NULL)
+ goto exit;
+ min = inl_dev->ipsec_in_min_spi;
+ max = inl_dev->ipsec_in_max_spi;
+ mask = inl_dev->inb_spi_mask;
+ } else {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ goto exit;
+ min = roc_nix->ipsec_in_min_spi;
+ max = roc_nix->ipsec_in_max_spi;
+ mask = nix->inb_spi_mask;
}
-
- return roc_nix->ipsec_in_max_spi;
+exit:
+ if (min_spi)
+ *min_spi = min;
+ if (max_spi)
+ *max_spi = max;
+ return mask;
}
uint32_t
roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
+ struct nix *nix;
if (idev == NULL)
return 0;
- if (!inl_dev_sa)
- return nix->inb_sa_sz;
+ if (!inl_dev_sa && roc_nix == NULL)
+ return -EINVAL;
- inl_dev = idev->nix_inl_dev;
- if (inl_dev_sa && inl_dev)
- return inl_dev->inb_sa_sz;
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!inl_dev_sa)
+ return nix->inb_sa_sz;
+ }
+
+ if (inl_dev_sa) {
+ inl_dev = idev->nix_inl_dev;
+ if (inl_dev)
+ return inl_dev->inb_sa_sz;
+ }
- /* On error */
return 0;
}
uintptr_t
roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
{
+ uint32_t max_spi = 0, min_spi = 0, mask;
uintptr_t sa_base;
- uint32_t max_spi;
uint64_t sz;
sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
if (!sa_base)
return 0;
- /* Check if SPI is in range */
- max_spi = roc_nix_inl_inb_sa_max_spi(roc_nix, inb_inl_dev);
- if (spi > max_spi) {
- plt_err("Inbound SA SPI %u exceeds max %u", spi, max_spi);
- return 0;
- }
-
/* Get SA size */
sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
if (!sz)
return 0;
+ if (roc_nix && roc_nix->custom_sa_action)
+ return (sa_base + (spi * sz));
+
+ /* Check if SPI is in range */
+ mask = roc_nix_inl_inb_spi_range(roc_nix, inb_inl_dev, &min_spi,
+ &max_spi);
+ if (spi > max_spi || spi < min_spi)
+ plt_nix_dbg("Inbound SA SPI %u not in range (%u..%u)", spi,
+ min_spi, max_spi);
+
/* Basic logic of SPI->SA for now */
- return (sa_base + (spi * sz));
+ return (sa_base + ((spi & mask) * sz));
+}
+
+int
+roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct roc_cpt *roc_cpt;
+ struct roc_cpt_rxc_time_cfg cfg;
+
+ PLT_SET_USED(max_frags);
+ roc_cpt = idev->cpt;
+ if (!roc_cpt) {
+ plt_err("Cannot support inline inbound, cryptodev not probed");
+ return -ENOTSUP;
+ }
+
+ cfg.step = (max_wait_time * 1000 / ROC_NIX_INL_REAS_ACTIVE_LIMIT);
+ cfg.zombie_limit = ROC_NIX_INL_REAS_ZOMBIE_LIMIT;
+ cfg.zombie_thres = ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD;
+ cfg.active_limit = ROC_NIX_INL_REAS_ACTIVE_LIMIT;
+ cfg.active_thres = ROC_NIX_INL_REAS_ACTIVE_THRESHOLD;
+
+ return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
}
int
nix->inl_inb_ena = false;
+ /* Flush Inbound CTX cache entries */
+ roc_nix_cpt_ctx_cache_sync(roc_nix);
+
/* Disable Inbound SA */
return nix_inl_sa_tbl_release(roc_nix);
}
struct dev *dev = &nix->dev;
struct msix_offset_rsp *rsp;
struct nix_inl_dev *inl_dev;
+ size_t sa_sz, ring_sz;
uint16_t sso_pffunc;
uint8_t eng_grpmask;
- uint64_t blkaddr;
+ uint64_t blkaddr, i;
+ uint64_t *ring_base;
uint16_t nb_lf;
void *sa_base;
- size_t sa_sz;
- int i, j, rc;
+ int j, rc;
+ void *sa;
if (idev == NULL)
return -ENOTSUP;
/* Retrieve inline device if present */
inl_dev = idev->nix_inl_dev;
sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
+ /* Use sso_pffunc if explicitly requested */
+ if (roc_nix->ipsec_out_sso_pffunc)
+ sso_pffunc = idev_sso_pffunc_get();
+
if (!sso_pffunc) {
plt_err("Failed to setup inline outb, need either "
"inline device or sso device");
eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
- rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, true);
+ rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
+ !roc_nix->ipsec_out_sso_pffunc);
if (rc) {
plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
goto lf_detach;
plt_err("Outbound SA base alloc failed");
goto lf_fini;
}
+ if (roc_model_is_cn10k()) {
+ for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
+ sa = ((uint8_t *)sa_base) + (i * sa_sz);
+ roc_ot_ipsec_outb_sa_init(sa);
+ }
+ }
nix->outb_sa_base = sa_base;
nix->outb_sa_sz = sa_sz;
nix->nb_cpt_lf = nb_lf;
nix->outb_err_sso_pffunc = sso_pffunc;
nix->inl_outb_ena = true;
+ nix->outb_se_ring_cnt =
+ roc_nix->ipsec_out_max_sa / ROC_IPSEC_ERR_RING_MAX_ENTRY + 1;
+ nix->outb_se_ring_base =
+ roc_nix->port_id * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
+
+ if (inl_dev == NULL || !inl_dev->set_soft_exp_poll) {
+ nix->outb_se_ring_cnt = 0;
+ return 0;
+ }
+
+ /* Allocate memory to be used as a ring buffer to poll for
+ * soft expiry event from ucode
+ */
+ ring_sz = (ROC_IPSEC_ERR_RING_MAX_ENTRY + 1) * sizeof(uint64_t);
+ ring_base = inl_dev->sa_soft_exp_ring;
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ ring_base[nix->outb_se_ring_base + i] =
+ PLT_U64_CAST(plt_zmalloc(ring_sz, 0));
+ if (!ring_base[nix->outb_se_ring_base + i]) {
+ plt_err("Couldn't allocate memory for soft exp ring");
+ while (i--)
+ plt_free(PLT_PTR_CAST(
+ ring_base[nix->outb_se_ring_base + i]));
+ rc = -ENOMEM;
+ goto lf_fini;
+ }
+ }
+
return 0;
lf_fini:
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
+ struct idev_cfg *idev = idev_get_cfg();
struct dev *dev = &nix->dev;
+ struct nix_inl_dev *inl_dev;
+ uint64_t *ring_base;
int i, rc, ret = 0;
if (!nix->inl_outb_ena)
plt_free(nix->outb_sa_base);
nix->outb_sa_base = NULL;
+ if (idev && idev->nix_inl_dev && nix->outb_se_ring_cnt) {
+ inl_dev = idev->nix_inl_dev;
+ ring_base = inl_dev->sa_soft_exp_ring;
+ ring_base += nix->outb_se_ring_base;
+
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ if (ring_base[i])
+ plt_free(PLT_PTR_CAST(ring_base[i]));
+ }
+ }
+
ret |= rc;
return ret;
}
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
struct dev *dev;
int rc;
if (!inl_dev)
return 0;
- /* Just take reference if already inited */
- if (inl_dev->rq_refs) {
- inl_dev->rq_refs++;
- rq->inl_dev_ref = true;
+ /* Check if this RQ is already holding reference */
+ if (rq->inl_dev_refs)
return 0;
- }
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
dev = &inl_dev->dev;
- inl_rq = &inl_dev->rq;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+
+ /* Just take reference if already inited */
+ if (inl_rq->inl_dev_refs) {
+ inl_rq->inl_dev_refs++;
+ rq->inl_dev_refs = 1;
+ return 0;
+ }
memset(inl_rq, 0, sizeof(struct roc_nix_rq));
/* Take RQ pool attributes from the first ethdev RQ */
- inl_rq->qid = 0;
+ inl_rq->qid = inl_rq_id;
inl_rq->aura_handle = rq->aura_handle;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
inl_rq->lpb_size = rq->lpb_size;
+ inl_rq->lpb_drop_ena = true;
+ inl_rq->spb_ena = rq->spb_ena;
+ inl_rq->spb_aura_handle = rq->spb_aura_handle;
+ inl_rq->spb_size = rq->spb_size;
+ inl_rq->spb_drop_ena = !!rq->spb_ena;
if (!roc_model_is_cn9k()) {
uint64_t aura_limit =
roc_npa_aura_op_limit_get(inl_rq->aura_handle);
uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
+
+ if (aura_shift < 8)
+ aura_shift = 0;
+ else
+ aura_shift = aura_shift - 8;
+
+ /* Set first pass RQ to drop after part of buffers are in
+ * use to avoid metabuf alloc failure. This is needed as long
+ * as we cannot use different aura.
+ */
+ drop_pc = inl_dev->lpb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
+ }
+
+ if (inl_rq->spb_ena) {
+ uint64_t aura_limit =
+ roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+ uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
if (aura_shift < 8)
aura_shift = 0;
else
aura_shift = aura_shift - 8;
- /* Set first pass RQ to drop when half of the buffers are in
+ /* Set first pass RQ to drop after part of buffers are in
* use to avoid metabuf alloc failure. This is needed as long
- * as we cannot use different
+ * as we cannot use different aura.
*/
- inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
- inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
+ drop_pc = inl_dev->spb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
}
/* Enable IPSec */
inl_rq->flow_tag_width = 20;
/* Special tag mask */
- inl_rq->tag_mask = 0xFFF00000;
+ inl_rq->tag_mask = rq->tag_mask;
inl_rq->tt = SSO_TT_ORDERED;
inl_rq->hwgrp = 0;
- inl_rq->wqe_skip = 1;
+ inl_rq->wqe_skip = inl_dev->wqe_skip;
inl_rq->sso_ena = true;
/* Prepare and send RQ init mbox */
return rc;
}
- inl_dev->rq_refs++;
- rq->inl_dev_ref = true;
+ inl_rq->inl_dev_refs++;
+ rq->inl_dev_refs = 1;
return 0;
}
roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
struct dev *dev;
int rc;
if (idev == NULL)
return 0;
- if (!rq->inl_dev_ref)
+ if (!rq->inl_dev_refs)
return 0;
inl_dev = idev->nix_inl_dev;
return -EFAULT;
}
- rq->inl_dev_ref = false;
- inl_dev->rq_refs--;
- if (inl_dev->rq_refs)
+ dev = &inl_dev->dev;
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+
+ rq->inl_dev_refs = 0;
+ inl_rq->inl_dev_refs--;
+ if (inl_rq->inl_dev_refs)
return 0;
- dev = &inl_dev->dev;
- inl_rq = &inl_dev->rq;
/* There are no more references, disable RQ */
rc = nix_rq_ena_dis(dev, inl_rq, false);
if (rc)
plt_err("Failed to disable inline device rq, rc=%d", rc);
+ roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
+ if (inl_rq->spb_ena)
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
+
/* Flush NIX LF for CN10K */
- if (roc_model_is_cn10k())
- plt_write64(0, inl_dev->nix_base + NIX_LF_OP_VWQE_FLUSH);
+ nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
return rc;
}
-uint64_t
-roc_nix_inl_dev_rq_limit_get(void)
+void
+roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ /* Info used by NPC flow rule add */
+ nix->inb_inl_dev = use_inl_dev;
+}
+
+int
+roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev;
- struct roc_nix_rq *inl_rq;
+ uint16_t ring_idx, i;
if (!idev || !idev->nix_inl_dev)
return 0;
inl_dev = idev->nix_inl_dev;
- if (!inl_dev->rq_refs)
- return 0;
- inl_rq = &inl_dev->rq;
+ for (i = 0; i < nix->outb_se_ring_cnt; i++) {
+ ring_idx = nix->outb_se_ring_base + i;
- return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
-}
+ if (poll)
+ plt_bitmap_set(inl_dev->soft_exp_ring_bmap, ring_idx);
+ else
+ plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, ring_idx);
+ }
-void
-roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
-{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ if (poll)
+ soft_exp_consumer_cnt++;
+ else
+ soft_exp_consumer_cnt--;
- /* Info used by NPC flow rule add */
- nix->inb_inl_dev = use_inl_dev;
+ return 0;
}
bool
}
struct roc_nix_rq *
-roc_nix_inl_dev_rq(void)
+roc_nix_inl_dev_rq(struct roc_nix *roc_nix)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = roc_nix->port_id;
struct nix_inl_dev *inl_dev;
+ struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
if (idev != NULL) {
inl_dev = idev->nix_inl_dev;
- if (inl_dev != NULL && inl_dev->rq_refs)
- return &inl_dev->rq;
+ if (inl_dev != NULL) {
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+ if (inl_rq->inl_dev_refs)
+ return inl_rq;
+ }
}
return NULL;
memset(&cfg, 0, sizeof(cfg));
cfg.sa_size = nix->inb_sa_sz;
cfg.iova = (uintptr_t)nix->inb_sa_base;
- cfg.max_sa = roc_nix->ipsec_in_max_spi + 1;
+ cfg.max_sa = nix->inb_spi_mask + 1;
cfg.tt = tt;
cfg.tag_const = tag_const;
roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
enum roc_nix_inl_sa_sync_op op)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct roc_cpt_lf *outb_lf = nix->cpt_lf_base;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ struct roc_cpt_lf *outb_lf = NULL;
union cpt_lf_ctx_reload reload;
union cpt_lf_ctx_flush flush;
+ bool get_inl_lf = true;
uintptr_t rbase;
+ struct nix *nix;
/* Nothing much to do on cn9k */
if (roc_model_is_cn9k()) {
return 0;
}
- if (!inb && !outb_lf)
+ if (idev)
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev && roc_nix == NULL)
return -EINVAL;
- /* Performing op via outbound lf is enough
- * when inline dev is not in use.
- */
- if (outb_lf && !nix->inb_inl_dev) {
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ outb_lf = nix->cpt_lf_base;
+ if (inb && !nix->inb_inl_dev)
+ get_inl_lf = false;
+ }
+
+ if (inb && get_inl_lf) {
+ outb_lf = NULL;
+ if (inl_dev && inl_dev->attach_cptlf)
+ outb_lf = &inl_dev->cpt_lf;
+ }
+
+ if (outb_lf) {
rbase = outb_lf->rbase;
flush.u = 0;
}
return 0;
}
+ plt_err("Could not get CPT LF for SA sync");
+ return -ENOTSUP;
+}
+
+int
+roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
+ bool inb, uint16_t sa_len)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ struct roc_cpt_lf *outb_lf = NULL;
+ union cpt_lf_ctx_flush flush;
+ bool get_inl_lf = true;
+ uintptr_t rbase;
+ struct nix *nix;
+ int rc;
+
+ /* Nothing much to do on cn9k */
+ if (roc_model_is_cn9k()) {
+ plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+ return 0;
+ }
+ if (idev)
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ outb_lf = nix->cpt_lf_base;
+
+ if (inb && !nix->inb_inl_dev)
+ get_inl_lf = false;
+ }
+
+ if (inb && get_inl_lf) {
+ outb_lf = NULL;
+ if (inl_dev && inl_dev->attach_cptlf)
+ outb_lf = &inl_dev->cpt_lf;
+ }
+
+ if (outb_lf) {
+ rbase = outb_lf->rbase;
+ flush.u = 0;
+
+ rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
+ if (rc)
+ return rc;
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
+ plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
+
+ return 0;
+ }
+ plt_nix_dbg("Could not get CPT LF for CTX write");
return -ENOTSUP;
}
+int
+roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ void *sa, *sa_base = NULL;
+ struct nix *nix = NULL;
+ uint16_t max_spi = 0;
+ uint32_t rq_refs = 0;
+ uint8_t pkind = 0;
+ int i;
+
+ if (roc_model_is_cn9k())
+ return 0;
+
+ if (!inb_inl_dev && (roc_nix == NULL))
+ return -EINVAL;
+
+ if (inb_inl_dev) {
+ if ((idev == NULL) || (idev->nix_inl_dev == NULL))
+ return 0;
+ inl_dev = idev->nix_inl_dev;
+ } else {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ sa_base = nix->inb_sa_base;
+ max_spi = roc_nix->ipsec_in_max_spi;
+ }
+
+ if (inl_dev) {
+ for (i = 0; i < inl_dev->nb_rqs; i++)
+ rq_refs += inl_dev->rqs[i].inl_dev_refs;
+
+ if (rq_refs == 0) {
+ inl_dev->ts_ena = ts_ena;
+ max_spi = inl_dev->ipsec_in_max_spi;
+ sa_base = inl_dev->inb_sa_base;
+ } else if (inl_dev->ts_ena != ts_ena) {
+ if (inl_dev->ts_ena)
+ plt_err("Inline device is already configured with TS enable");
+ else
+ plt_err("Inline device is already configured with TS disable");
+ return -ENOTSUP;
+ } else {
+ return 0;
+ }
+ }
+
+ pkind = ts_ena ? ROC_IE_OT_CPT_TS_PKIND : ROC_IE_OT_CPT_PKIND;
+
+ sa = (uint8_t *)sa_base;
+ if (pkind == ((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind)
+ return 0;
+
+ for (i = 0; i < max_spi; i++) {
+ sa = ((uint8_t *)sa_base) +
+ (i * ROC_NIX_INL_OT_IPSEC_INB_SA_SZ);
+ ((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind = pkind;
+ }
+ return 0;
+}
+
void
roc_nix_inl_dev_lock(void)
{