if (roc_model_is_cn10k()) {
for (i = 0; i < ipsec_in_max_spi; i++) {
sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
- roc_nix_inl_inb_sa_init(sa);
+ roc_ot_ipsec_inb_sa_init(sa, true);
}
}
/* Retrieve inline device if present */
inl_dev = idev->nix_inl_dev;
sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
+ /* Use sso_pffunc if explicitly requested */
+ if (roc_nix->ipsec_out_sso_pffunc)
+ sso_pffunc = idev_sso_pffunc_get();
+
if (!sso_pffunc) {
plt_err("Failed to setup inline outb, need either "
"inline device or sso device");
eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
- rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, true);
+ rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
+ !roc_nix->ipsec_out_sso_pffunc);
if (rc) {
plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
goto lf_detach;
if (roc_model_is_cn10k()) {
for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
sa = ((uint8_t *)sa_base) + (i * sa_sz);
- roc_nix_inl_outb_sa_init(sa);
+ roc_ot_ipsec_outb_sa_init(sa);
}
}
nix->outb_sa_base = sa_base;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
inl_rq->lpb_size = rq->lpb_size;
+ inl_rq->lpb_drop_ena = true;
+ inl_rq->spb_ena = rq->spb_ena;
+ inl_rq->spb_aura_handle = rq->spb_aura_handle;
+ inl_rq->spb_size = rq->spb_size;
+ inl_rq->spb_drop_ena = !!rq->spb_ena;
if (!roc_model_is_cn9k()) {
uint64_t aura_limit =
roc_npa_aura_op_limit_get(inl_rq->aura_handle);
uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
+
+ if (aura_shift < 8)
+ aura_shift = 0;
+ else
+ aura_shift = aura_shift - 8;
+
+ /* Set first pass RQ to drop after part of buffers are in
+ * use to avoid metabuf alloc failure. This is needed as long
+ * as we cannot use different aura.
+ */
+ drop_pc = inl_dev->lpb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
+ }
+
+ if (inl_rq->spb_ena) {
+ uint64_t aura_limit =
+ roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+ uint64_t aura_shift = plt_log2_u32(aura_limit);
+ uint64_t aura_drop, drop_pc;
if (aura_shift < 8)
aura_shift = 0;
else
aura_shift = aura_shift - 8;
- /* Set first pass RQ to drop when half of the buffers are in
+ /* Set first pass RQ to drop after part of buffers are in
* use to avoid metabuf alloc failure. This is needed as long
- * as we cannot use different
+ * as we cannot use different aura.
*/
- inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
- inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
+ drop_pc = inl_dev->spb_drop_pc;
+ aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
}
/* Enable IPSec */
if (rc)
plt_err("Failed to disable inline device rq, rc=%d", rc);
+ roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
+ if (inl_rq->spb_ena)
+ roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
+
/* Flush NIX LF for CN10K */
nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
return -ENOTSUP;
}
-void
-roc_nix_inl_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa)
-{
- size_t offset;
-
- memset(sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
-
- offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
- sa->w0.s.hw_ctx_off = offset / ROC_CTX_UNIT_8B;
- sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
- sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN;
- sa->w0.s.aop_valid = 1;
-}
-
-void
-roc_nix_inl_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa)
-{
- size_t offset;
-
- memset(sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
-
- offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
- sa->w0.s.ctx_push_size = (offset / ROC_CTX_UNIT_8B);
- sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN;
- sa->w0.s.aop_valid = 1;
-}
-
void
roc_nix_inl_dev_lock(void)
{