1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
9 1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
10 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
11 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
12 1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
13 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
14 1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
15 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
16 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
17 1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
20 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
22 uint16_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
23 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
24 struct roc_nix_ipsec_cfg cfg;
28 /* CN9K SA size is different */
29 if (roc_model_is_cn9k())
30 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
32 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
34 /* Alloc contiguous memory for Inbound SA's */
35 nix->inb_sa_sz = inb_sa_sz;
36 nix->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
37 ROC_NIX_INL_SA_BASE_ALIGN);
38 if (!nix->inb_sa_base) {
39 plt_err("Failed to allocate memory for Inbound SA");
43 memset(&cfg, 0, sizeof(cfg));
44 cfg.sa_size = inb_sa_sz;
45 cfg.iova = (uintptr_t)nix->inb_sa_base;
46 cfg.max_sa = ipsec_in_max_spi + 1;
47 cfg.tt = SSO_TT_ORDERED;
49 /* Setup device specific inb SA table */
50 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
52 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
58 plt_free(nix->inb_sa_base);
59 nix->inb_sa_base = NULL;
64 nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
66 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
69 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
71 plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
75 plt_free(nix->inb_sa_base);
76 nix->inb_sa_base = NULL;
81 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
83 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
85 /* NIX Inline config needs to be done */
86 if (!nix->inl_outb_ena || !nix->cpt_lf_base)
89 return (struct roc_cpt_lf *)nix->cpt_lf_base;
93 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
95 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
97 return (uintptr_t)nix->outb_sa_base;
101 roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
103 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
104 struct idev_cfg *idev = idev_get_cfg();
105 struct nix_inl_dev *inl_dev;
110 if (!nix->inl_inb_ena)
113 inl_dev = idev->nix_inl_dev;
115 /* Return inline dev sa base */
117 return (uintptr_t)inl_dev->inb_sa_base;
121 return (uintptr_t)nix->inb_sa_base;
125 roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
127 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
128 struct idev_cfg *idev = idev_get_cfg();
129 struct nix_inl_dev *inl_dev;
134 if (!nix->inl_inb_ena)
137 inl_dev = idev->nix_inl_dev;
140 return inl_dev->ipsec_in_max_spi;
144 return roc_nix->ipsec_in_max_spi;
148 roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
150 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
151 struct idev_cfg *idev = idev_get_cfg();
152 struct nix_inl_dev *inl_dev;
158 return nix->inb_sa_sz;
160 inl_dev = idev->nix_inl_dev;
161 if (inl_dev_sa && inl_dev)
162 return inl_dev->inb_sa_sz;
169 roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
175 sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
176 /* Check if SA base exists */
180 /* Check if SPI is in range */
181 max_spi = roc_nix_inl_inb_sa_max_spi(roc_nix, inb_inl_dev);
183 plt_err("Inbound SA SPI %u exceeds max %u", spi, max_spi);
188 sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
192 /* Basic logic of SPI->SA for now */
193 return (sa_base + (spi * sz));
197 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
199 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
200 struct idev_cfg *idev = idev_get_cfg();
201 struct roc_cpt *roc_cpt;
208 /* Unless we have another mechanism to trigger
209 * onetime Inline config in CPTPF, we cannot
210 * support without CPT being probed.
214 plt_err("Cannot support inline inbound, cryptodev not probed");
218 if (roc_model_is_cn9k()) {
219 param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
221 union roc_ot_ipsec_inb_param1 u;
224 u.s.esp_trailer_disable = 1;
228 /* Do onetime Inbound Inline config in CPTPF */
229 rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
230 if (rc && rc != -EEXIST) {
231 plt_err("Failed to setup inbound lf, rc=%d", rc);
235 /* Setup Inbound SA table */
236 rc = nix_inl_inb_sa_tbl_setup(roc_nix);
240 nix->inl_inb_ena = true;
245 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
247 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
249 if (!nix->inl_inb_ena)
252 nix->inl_inb_ena = false;
254 /* Disable Inbound SA */
255 return nix_inl_sa_tbl_release(roc_nix);
259 roc_nix_inl_outb_init(struct roc_nix *roc_nix)
261 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
262 struct idev_cfg *idev = idev_get_cfg();
263 struct roc_cpt_lf *lf_base, *lf;
264 struct dev *dev = &nix->dev;
265 struct msix_offset_rsp *rsp;
266 struct nix_inl_dev *inl_dev;
278 nb_lf = roc_nix->outb_nb_crypto_qs;
279 blkaddr = nix->is_nix1 ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
281 /* Retrieve inline device if present */
282 inl_dev = idev->nix_inl_dev;
283 sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
285 plt_err("Failed to setup inline outb, need either "
286 "inline device or sso device");
290 /* Attach CPT LF for outbound */
291 rc = cpt_lfs_attach(dev, blkaddr, true, nb_lf);
293 plt_err("Failed to attach CPT LF for inline outb, rc=%d", rc);
298 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
299 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
300 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
301 rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, true);
303 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
307 /* Get msix offsets */
308 rc = cpt_get_msix_offset(dev, &rsp);
310 plt_err("Failed to get CPT LF msix offset, rc=%d", rc);
314 mbox_memcpy(nix->cpt_msixoff,
315 nix->is_nix1 ? rsp->cpt1_lf_msixoff : rsp->cptlf_msixoff,
316 sizeof(nix->cpt_msixoff));
318 /* Alloc required num of cpt lfs */
319 lf_base = plt_zmalloc(nb_lf * sizeof(struct roc_cpt_lf), 0);
321 plt_err("Failed to alloc cpt lf memory");
326 /* Initialize CPT LF's */
327 for (i = 0; i < nb_lf; i++) {
331 lf->nb_desc = roc_nix->outb_nb_desc;
333 lf->msixoff = nix->cpt_msixoff[i];
334 lf->pci_dev = nix->pci_dev;
336 /* Setup CPT LF instruction queue */
337 rc = cpt_lf_init(lf);
339 plt_err("Failed to initialize CPT LF, rc=%d", rc);
343 /* Associate this CPT LF with NIX PFFUNC */
344 rc = cpt_lf_outb_cfg(dev, sso_pffunc, nix->dev.pf_func, i,
347 plt_err("Failed to setup CPT LF->(NIX,SSO) link, rc=%d",
353 roc_cpt_iq_enable(lf);
356 if (!roc_nix->ipsec_out_max_sa)
359 /* CN9K SA size is different */
360 if (roc_model_is_cn9k())
361 sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
363 sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
364 /* Alloc contiguous memory of outbound SA */
365 sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
366 ROC_NIX_INL_SA_BASE_ALIGN);
368 plt_err("Outbound SA base alloc failed");
371 nix->outb_sa_base = sa_base;
372 nix->outb_sa_sz = sa_sz;
376 nix->cpt_lf_base = lf_base;
377 nix->nb_cpt_lf = nb_lf;
378 nix->outb_err_sso_pffunc = sso_pffunc;
379 nix->inl_outb_ena = true;
383 for (j = i - 1; j >= 0; j--)
384 cpt_lf_fini(&lf_base[j]);
387 rc |= cpt_lfs_free(dev);
389 rc |= cpt_lfs_detach(dev);
394 roc_nix_inl_outb_fini(struct roc_nix *roc_nix)
396 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
397 struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
398 struct dev *dev = &nix->dev;
401 if (!nix->inl_outb_ena)
404 nix->inl_outb_ena = false;
406 /* Cleanup CPT LF instruction queue */
407 for (i = 0; i < nix->nb_cpt_lf; i++)
408 cpt_lf_fini(&lf_base[i]);
410 /* Free LF resources */
411 rc = cpt_lfs_free(dev);
413 plt_err("Failed to free CPT LF resources, rc=%d", rc);
417 rc = cpt_lfs_detach(dev);
419 plt_err("Failed to detach CPT LF, rc=%d", rc);
423 nix->cpt_lf_base = NULL;
426 /* Free outbound SA base */
427 plt_free(nix->outb_sa_base);
428 nix->outb_sa_base = NULL;
435 roc_nix_inl_dev_is_probed(void)
437 struct idev_cfg *idev = idev_get_cfg();
442 return !!idev->nix_inl_dev;
446 roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix)
448 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
450 return nix->inl_inb_ena;
454 roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
456 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
458 return nix->inl_outb_ena;
462 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
464 struct idev_cfg *idev = idev_get_cfg();
465 struct nix_inl_dev *inl_dev;
466 struct roc_nix_rq *inl_rq;
473 inl_dev = idev->nix_inl_dev;
474 /* Nothing to do if no inline device */
478 /* Just take reference if already inited */
479 if (inl_dev->rq_refs) {
481 rq->inl_dev_ref = true;
486 inl_rq = &inl_dev->rq;
487 memset(inl_rq, 0, sizeof(struct roc_nix_rq));
489 /* Take RQ pool attributes from the first ethdev RQ */
491 inl_rq->aura_handle = rq->aura_handle;
492 inl_rq->first_skip = rq->first_skip;
493 inl_rq->later_skip = rq->later_skip;
494 inl_rq->lpb_size = rq->lpb_size;
496 if (!roc_model_is_cn9k()) {
497 uint64_t aura_limit =
498 roc_npa_aura_op_limit_get(inl_rq->aura_handle);
499 uint64_t aura_shift = plt_log2_u32(aura_limit);
504 aura_shift = aura_shift - 8;
506 /* Set first pass RQ to drop when half of the buffers are in
507 * use to avoid metabuf alloc failure. This is needed as long
508 * as we cannot use different
510 inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
511 inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
515 inl_rq->ipsech_ena = true;
517 inl_rq->flow_tag_width = 20;
518 /* Special tag mask */
519 inl_rq->tag_mask = 0xFFF00000;
520 inl_rq->tt = SSO_TT_ORDERED;
522 inl_rq->wqe_skip = 1;
523 inl_rq->sso_ena = true;
525 /* Prepare and send RQ init mbox */
526 if (roc_model_is_cn9k())
527 rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
529 rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
531 plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
535 rc = mbox_process(dev->mbox);
537 plt_err("Failed to send aq_enq msg, rc=%d", rc);
542 rq->inl_dev_ref = true;
547 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
549 struct idev_cfg *idev = idev_get_cfg();
550 struct nix_inl_dev *inl_dev;
551 struct roc_nix_rq *inl_rq;
558 if (!rq->inl_dev_ref)
561 inl_dev = idev->nix_inl_dev;
562 /* Inline device should be there if we have ref */
564 plt_err("Failed to find inline device with refs");
568 rq->inl_dev_ref = false;
570 if (inl_dev->rq_refs)
574 inl_rq = &inl_dev->rq;
575 /* There are no more references, disable RQ */
576 rc = nix_rq_ena_dis(dev, inl_rq, false);
578 plt_err("Failed to disable inline device rq, rc=%d", rc);
580 /* Flush NIX LF for CN10K */
581 if (roc_model_is_cn10k())
582 plt_write64(0, inl_dev->nix_base + NIX_LF_OP_VWQE_FLUSH);
588 roc_nix_inl_dev_rq_limit_get(void)
590 struct idev_cfg *idev = idev_get_cfg();
591 struct nix_inl_dev *inl_dev;
592 struct roc_nix_rq *inl_rq;
594 if (!idev || !idev->nix_inl_dev)
597 inl_dev = idev->nix_inl_dev;
598 if (!inl_dev->rq_refs)
601 inl_rq = &inl_dev->rq;
603 return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
607 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
609 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
611 /* Info used by NPC flow rule add */
612 nix->inb_inl_dev = use_inl_dev;
616 roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
618 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
620 return nix->inb_inl_dev;
624 roc_nix_inl_dev_rq(void)
626 struct idev_cfg *idev = idev_get_cfg();
627 struct nix_inl_dev *inl_dev;
630 inl_dev = idev->nix_inl_dev;
631 if (inl_dev != NULL && inl_dev->rq_refs)
639 roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix)
641 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
643 return nix->outb_err_sso_pffunc;
647 roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args)
649 struct idev_cfg *idev = idev_get_cfg();
650 struct nix_inl_dev *inl_dev;
655 inl_dev = idev->nix_inl_dev;
659 /* Be silent if registration called with same cb and args */
660 if (inl_dev->work_cb == cb && inl_dev->cb_args == args)
663 /* Don't allow registration again if registered with different cb */
664 if (inl_dev->work_cb)
667 inl_dev->work_cb = cb;
668 inl_dev->cb_args = args;
673 roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb, void *args)
675 struct idev_cfg *idev = idev_get_cfg();
676 struct nix_inl_dev *inl_dev;
681 inl_dev = idev->nix_inl_dev;
685 if (inl_dev->work_cb != cb || inl_dev->cb_args != args)
688 inl_dev->work_cb = NULL;
689 inl_dev->cb_args = NULL;
694 roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
697 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
698 struct roc_nix_ipsec_cfg cfg;
700 /* Be silent if inline inbound not enabled */
701 if (!nix->inl_inb_ena)
704 memset(&cfg, 0, sizeof(cfg));
705 cfg.sa_size = nix->inb_sa_sz;
706 cfg.iova = (uintptr_t)nix->inb_sa_base;
707 cfg.max_sa = roc_nix->ipsec_in_max_spi + 1;
709 cfg.tag_const = tag_const;
711 return roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
715 roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
716 enum roc_nix_inl_sa_sync_op op)
718 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
719 struct roc_cpt_lf *outb_lf = nix->cpt_lf_base;
720 union cpt_lf_ctx_reload reload;
721 union cpt_lf_ctx_flush flush;
724 /* Nothing much to do on cn9k */
725 if (roc_model_is_cn9k()) {
726 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
730 if (!inb && !outb_lf)
733 /* Performing op via outbound lf is enough
734 * when inline dev is not in use.
736 if (outb_lf && !nix->inb_inl_dev) {
737 rbase = outb_lf->rbase;
742 case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
745 case ROC_NIX_INL_SA_OP_FLUSH:
746 flush.s.cptr = ((uintptr_t)sa) >> 7;
747 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
749 case ROC_NIX_INL_SA_OP_RELOAD:
750 reload.s.cptr = ((uintptr_t)sa) >> 7;
751 plt_write64(reload.u, rbase + CPT_LF_CTX_RELOAD);
763 roc_nix_inl_dev_lock(void)
765 struct idev_cfg *idev = idev_get_cfg();
768 plt_spinlock_lock(&idev->nix_inl_dev_lock);
772 roc_nix_inl_dev_unlock(void)
774 struct idev_cfg *idev = idev_get_cfg();
777 plt_spinlock_unlock(&idev->nix_inl_dev_lock);