1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
9 1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
10 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
11 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
12 1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
13 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
14 1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
15 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
16 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
17 1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
20 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
22 uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
23 uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
24 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
25 struct roc_nix_ipsec_cfg cfg;
31 max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
33 /* CN9K SA size is different */
34 if (roc_model_is_cn9k())
35 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
37 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
39 /* Alloc contiguous memory for Inbound SA's */
40 nix->inb_sa_sz = inb_sa_sz;
41 nix->inb_spi_mask = max_sa - 1;
42 nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
43 ROC_NIX_INL_SA_BASE_ALIGN);
44 if (!nix->inb_sa_base) {
45 plt_err("Failed to allocate memory for Inbound SA");
48 if (roc_model_is_cn10k()) {
49 for (i = 0; i < max_sa; i++) {
50 sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
51 roc_ot_ipsec_inb_sa_init(sa, true);
55 memset(&cfg, 0, sizeof(cfg));
56 cfg.sa_size = inb_sa_sz;
57 cfg.iova = (uintptr_t)nix->inb_sa_base;
59 cfg.tt = SSO_TT_ORDERED;
61 /* Setup device specific inb SA table */
62 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
64 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
70 plt_free(nix->inb_sa_base);
71 nix->inb_sa_base = NULL;
76 nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
78 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
81 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
83 plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
87 plt_free(nix->inb_sa_base);
88 nix->inb_sa_base = NULL;
93 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
95 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
97 /* NIX Inline config needs to be done */
98 if (!nix->inl_outb_ena || !nix->cpt_lf_base)
101 return (struct roc_cpt_lf *)nix->cpt_lf_base;
105 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
107 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
109 return (uintptr_t)nix->outb_sa_base;
113 roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
115 struct idev_cfg *idev = idev_get_cfg();
116 struct nix_inl_dev *inl_dev;
117 struct nix *nix = NULL;
122 if (!inb_inl_dev && roc_nix == NULL)
126 nix = roc_nix_to_nix_priv(roc_nix);
127 if (!nix->inl_inb_ena)
132 inl_dev = idev->nix_inl_dev;
133 /* Return inline dev sa base */
135 return (uintptr_t)inl_dev->inb_sa_base;
139 return (uintptr_t)nix->inb_sa_base;
143 roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
144 uint32_t *min_spi, uint32_t *max_spi)
146 struct idev_cfg *idev = idev_get_cfg();
147 uint32_t min = 0, max = 0, mask = 0;
148 struct nix_inl_dev *inl_dev;
149 struct nix *nix = NULL;
154 if (!inb_inl_dev && roc_nix == NULL)
157 inl_dev = idev->nix_inl_dev;
159 min = inl_dev->ipsec_in_min_spi;
160 max = inl_dev->ipsec_in_max_spi;
161 mask = inl_dev->inb_spi_mask;
163 nix = roc_nix_to_nix_priv(roc_nix);
164 if (!nix->inl_inb_ena)
166 min = roc_nix->ipsec_in_min_spi;
167 max = roc_nix->ipsec_in_max_spi;
168 mask = nix->inb_spi_mask;
179 roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
181 struct idev_cfg *idev = idev_get_cfg();
182 struct nix_inl_dev *inl_dev;
188 if (!inl_dev_sa && roc_nix == NULL)
192 nix = roc_nix_to_nix_priv(roc_nix);
194 return nix->inb_sa_sz;
198 inl_dev = idev->nix_inl_dev;
200 return inl_dev->inb_sa_sz;
207 roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
209 uint32_t max_spi, min_spi, mask;
213 sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
214 /* Check if SA base exists */
218 /* Check if SPI is in range */
219 mask = roc_nix_inl_inb_spi_range(roc_nix, inb_inl_dev, &min_spi,
221 if (spi > max_spi || spi < min_spi)
222 plt_warn("Inbound SA SPI %u not in range (%u..%u)", spi,
226 sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
230 /* Basic logic of SPI->SA for now */
231 return (sa_base + ((spi & mask) * sz));
235 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
237 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
238 struct idev_cfg *idev = idev_get_cfg();
239 struct roc_cpt *roc_cpt;
246 /* Unless we have another mechanism to trigger
247 * onetime Inline config in CPTPF, we cannot
248 * support without CPT being probed.
252 plt_err("Cannot support inline inbound, cryptodev not probed");
256 if (roc_model_is_cn9k()) {
257 param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
259 union roc_ot_ipsec_inb_param1 u;
262 u.s.esp_trailer_disable = 1;
266 /* Do onetime Inbound Inline config in CPTPF */
267 rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
268 if (rc && rc != -EEXIST) {
269 plt_err("Failed to setup inbound lf, rc=%d", rc);
273 /* Setup Inbound SA table */
274 rc = nix_inl_inb_sa_tbl_setup(roc_nix);
278 nix->inl_inb_ena = true;
283 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
285 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
287 if (!nix->inl_inb_ena)
290 nix->inl_inb_ena = false;
292 /* Flush Inbound CTX cache entries */
293 roc_nix_cpt_ctx_cache_sync(roc_nix);
295 /* Disable Inbound SA */
296 return nix_inl_sa_tbl_release(roc_nix);
300 roc_nix_inl_outb_init(struct roc_nix *roc_nix)
302 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
303 struct idev_cfg *idev = idev_get_cfg();
304 struct roc_cpt_lf *lf_base, *lf;
305 struct dev *dev = &nix->dev;
306 struct msix_offset_rsp *rsp;
307 struct nix_inl_dev *inl_dev;
320 nb_lf = roc_nix->outb_nb_crypto_qs;
321 blkaddr = nix->is_nix1 ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
323 /* Retrieve inline device if present */
324 inl_dev = idev->nix_inl_dev;
325 sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
326 /* Use sso_pffunc if explicitly requested */
327 if (roc_nix->ipsec_out_sso_pffunc)
328 sso_pffunc = idev_sso_pffunc_get();
331 plt_err("Failed to setup inline outb, need either "
332 "inline device or sso device");
336 /* Attach CPT LF for outbound */
337 rc = cpt_lfs_attach(dev, blkaddr, true, nb_lf);
339 plt_err("Failed to attach CPT LF for inline outb, rc=%d", rc);
344 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
345 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
346 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
347 rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
348 !roc_nix->ipsec_out_sso_pffunc);
350 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
354 /* Get msix offsets */
355 rc = cpt_get_msix_offset(dev, &rsp);
357 plt_err("Failed to get CPT LF msix offset, rc=%d", rc);
361 mbox_memcpy(nix->cpt_msixoff,
362 nix->is_nix1 ? rsp->cpt1_lf_msixoff : rsp->cptlf_msixoff,
363 sizeof(nix->cpt_msixoff));
365 /* Alloc required num of cpt lfs */
366 lf_base = plt_zmalloc(nb_lf * sizeof(struct roc_cpt_lf), 0);
368 plt_err("Failed to alloc cpt lf memory");
373 /* Initialize CPT LF's */
374 for (i = 0; i < nb_lf; i++) {
378 lf->nb_desc = roc_nix->outb_nb_desc;
380 lf->msixoff = nix->cpt_msixoff[i];
381 lf->pci_dev = nix->pci_dev;
383 /* Setup CPT LF instruction queue */
384 rc = cpt_lf_init(lf);
386 plt_err("Failed to initialize CPT LF, rc=%d", rc);
390 /* Associate this CPT LF with NIX PFFUNC */
391 rc = cpt_lf_outb_cfg(dev, sso_pffunc, nix->dev.pf_func, i,
394 plt_err("Failed to setup CPT LF->(NIX,SSO) link, rc=%d",
400 roc_cpt_iq_enable(lf);
403 if (!roc_nix->ipsec_out_max_sa)
406 /* CN9K SA size is different */
407 if (roc_model_is_cn9k())
408 sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
410 sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
411 /* Alloc contiguous memory of outbound SA */
412 sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
413 ROC_NIX_INL_SA_BASE_ALIGN);
415 plt_err("Outbound SA base alloc failed");
418 if (roc_model_is_cn10k()) {
419 for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
420 sa = ((uint8_t *)sa_base) + (i * sa_sz);
421 roc_ot_ipsec_outb_sa_init(sa);
424 nix->outb_sa_base = sa_base;
425 nix->outb_sa_sz = sa_sz;
429 nix->cpt_lf_base = lf_base;
430 nix->nb_cpt_lf = nb_lf;
431 nix->outb_err_sso_pffunc = sso_pffunc;
432 nix->inl_outb_ena = true;
436 for (j = i - 1; j >= 0; j--)
437 cpt_lf_fini(&lf_base[j]);
440 rc |= cpt_lfs_free(dev);
442 rc |= cpt_lfs_detach(dev);
447 roc_nix_inl_outb_fini(struct roc_nix *roc_nix)
449 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
450 struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
451 struct dev *dev = &nix->dev;
454 if (!nix->inl_outb_ena)
457 nix->inl_outb_ena = false;
459 /* Cleanup CPT LF instruction queue */
460 for (i = 0; i < nix->nb_cpt_lf; i++)
461 cpt_lf_fini(&lf_base[i]);
463 /* Free LF resources */
464 rc = cpt_lfs_free(dev);
466 plt_err("Failed to free CPT LF resources, rc=%d", rc);
470 rc = cpt_lfs_detach(dev);
472 plt_err("Failed to detach CPT LF, rc=%d", rc);
476 nix->cpt_lf_base = NULL;
479 /* Free outbound SA base */
480 plt_free(nix->outb_sa_base);
481 nix->outb_sa_base = NULL;
488 roc_nix_inl_dev_is_probed(void)
490 struct idev_cfg *idev = idev_get_cfg();
495 return !!idev->nix_inl_dev;
499 roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix)
501 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
503 return nix->inl_inb_ena;
507 roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
509 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
511 return nix->inl_outb_ena;
515 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
517 struct idev_cfg *idev = idev_get_cfg();
518 struct nix_inl_dev *inl_dev;
519 struct roc_nix_rq *inl_rq;
526 inl_dev = idev->nix_inl_dev;
527 /* Nothing to do if no inline device */
531 /* Just take reference if already inited */
532 if (inl_dev->rq_refs) {
534 rq->inl_dev_ref = true;
539 inl_rq = &inl_dev->rq;
540 memset(inl_rq, 0, sizeof(struct roc_nix_rq));
542 /* Take RQ pool attributes from the first ethdev RQ */
544 inl_rq->aura_handle = rq->aura_handle;
545 inl_rq->first_skip = rq->first_skip;
546 inl_rq->later_skip = rq->later_skip;
547 inl_rq->lpb_size = rq->lpb_size;
548 inl_rq->lpb_drop_ena = true;
549 inl_rq->spb_ena = rq->spb_ena;
550 inl_rq->spb_aura_handle = rq->spb_aura_handle;
551 inl_rq->spb_size = rq->spb_size;
552 inl_rq->spb_drop_ena = !!rq->spb_ena;
554 if (!roc_model_is_cn9k()) {
555 uint64_t aura_limit =
556 roc_npa_aura_op_limit_get(inl_rq->aura_handle);
557 uint64_t aura_shift = plt_log2_u32(aura_limit);
558 uint64_t aura_drop, drop_pc;
563 aura_shift = aura_shift - 8;
565 /* Set first pass RQ to drop after part of buffers are in
566 * use to avoid metabuf alloc failure. This is needed as long
567 * as we cannot use different aura.
569 drop_pc = inl_dev->lpb_drop_pc;
570 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
571 roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
574 if (inl_rq->spb_ena) {
575 uint64_t aura_limit =
576 roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
577 uint64_t aura_shift = plt_log2_u32(aura_limit);
578 uint64_t aura_drop, drop_pc;
583 aura_shift = aura_shift - 8;
585 /* Set first pass RQ to drop after part of buffers are in
586 * use to avoid metabuf alloc failure. This is needed as long
587 * as we cannot use different aura.
589 drop_pc = inl_dev->spb_drop_pc;
590 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
591 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
595 inl_rq->ipsech_ena = true;
597 inl_rq->flow_tag_width = 20;
598 /* Special tag mask */
599 inl_rq->tag_mask = rq->tag_mask;
600 inl_rq->tt = SSO_TT_ORDERED;
602 inl_rq->wqe_skip = inl_dev->wqe_skip;
603 inl_rq->sso_ena = true;
605 /* Prepare and send RQ init mbox */
606 if (roc_model_is_cn9k())
607 rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
609 rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
611 plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
615 rc = mbox_process(dev->mbox);
617 plt_err("Failed to send aq_enq msg, rc=%d", rc);
622 rq->inl_dev_ref = true;
627 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
629 struct idev_cfg *idev = idev_get_cfg();
630 struct nix_inl_dev *inl_dev;
631 struct roc_nix_rq *inl_rq;
638 if (!rq->inl_dev_ref)
641 inl_dev = idev->nix_inl_dev;
642 /* Inline device should be there if we have ref */
644 plt_err("Failed to find inline device with refs");
648 rq->inl_dev_ref = false;
650 if (inl_dev->rq_refs)
654 inl_rq = &inl_dev->rq;
655 /* There are no more references, disable RQ */
656 rc = nix_rq_ena_dis(dev, inl_rq, false);
658 plt_err("Failed to disable inline device rq, rc=%d", rc);
660 roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
662 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
664 /* Flush NIX LF for CN10K */
665 nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
671 roc_nix_inl_dev_rq_limit_get(void)
673 struct idev_cfg *idev = idev_get_cfg();
674 struct nix_inl_dev *inl_dev;
675 struct roc_nix_rq *inl_rq;
677 if (!idev || !idev->nix_inl_dev)
680 inl_dev = idev->nix_inl_dev;
681 if (!inl_dev->rq_refs)
684 inl_rq = &inl_dev->rq;
686 return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
690 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
692 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
694 /* Info used by NPC flow rule add */
695 nix->inb_inl_dev = use_inl_dev;
699 roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
701 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
703 return nix->inb_inl_dev;
707 roc_nix_inl_dev_rq(void)
709 struct idev_cfg *idev = idev_get_cfg();
710 struct nix_inl_dev *inl_dev;
713 inl_dev = idev->nix_inl_dev;
714 if (inl_dev != NULL && inl_dev->rq_refs)
722 roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix)
724 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
726 return nix->outb_err_sso_pffunc;
730 roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args)
732 struct idev_cfg *idev = idev_get_cfg();
733 struct nix_inl_dev *inl_dev;
738 inl_dev = idev->nix_inl_dev;
742 /* Be silent if registration called with same cb and args */
743 if (inl_dev->work_cb == cb && inl_dev->cb_args == args)
746 /* Don't allow registration again if registered with different cb */
747 if (inl_dev->work_cb)
750 inl_dev->work_cb = cb;
751 inl_dev->cb_args = args;
756 roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb, void *args)
758 struct idev_cfg *idev = idev_get_cfg();
759 struct nix_inl_dev *inl_dev;
764 inl_dev = idev->nix_inl_dev;
768 if (inl_dev->work_cb != cb || inl_dev->cb_args != args)
771 inl_dev->work_cb = NULL;
772 inl_dev->cb_args = NULL;
777 roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
780 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
781 struct roc_nix_ipsec_cfg cfg;
783 /* Be silent if inline inbound not enabled */
784 if (!nix->inl_inb_ena)
787 memset(&cfg, 0, sizeof(cfg));
788 cfg.sa_size = nix->inb_sa_sz;
789 cfg.iova = (uintptr_t)nix->inb_sa_base;
790 cfg.max_sa = nix->inb_spi_mask + 1;
792 cfg.tag_const = tag_const;
794 return roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
798 roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
799 enum roc_nix_inl_sa_sync_op op)
801 struct idev_cfg *idev = idev_get_cfg();
802 struct nix_inl_dev *inl_dev = NULL;
803 struct roc_cpt_lf *outb_lf = NULL;
804 union cpt_lf_ctx_reload reload;
805 union cpt_lf_ctx_flush flush;
806 bool get_inl_lf = true;
810 /* Nothing much to do on cn9k */
811 if (roc_model_is_cn9k()) {
812 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
817 inl_dev = idev->nix_inl_dev;
819 if (!inl_dev && roc_nix == NULL)
823 nix = roc_nix_to_nix_priv(roc_nix);
824 outb_lf = nix->cpt_lf_base;
825 if (inb && !nix->inb_inl_dev)
829 if (inb && get_inl_lf) {
831 if (inl_dev && inl_dev->attach_cptlf)
832 outb_lf = &inl_dev->cpt_lf;
836 rbase = outb_lf->rbase;
841 case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
844 case ROC_NIX_INL_SA_OP_FLUSH:
845 flush.s.cptr = ((uintptr_t)sa) >> 7;
846 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
848 case ROC_NIX_INL_SA_OP_RELOAD:
849 reload.s.cptr = ((uintptr_t)sa) >> 7;
850 plt_write64(reload.u, rbase + CPT_LF_CTX_RELOAD);
857 plt_err("Could not get CPT LF for SA sync");
862 roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
863 bool inb, uint16_t sa_len)
865 struct idev_cfg *idev = idev_get_cfg();
866 struct nix_inl_dev *inl_dev = NULL;
867 struct roc_cpt_lf *outb_lf = NULL;
868 union cpt_lf_ctx_flush flush;
869 bool get_inl_lf = true;
874 /* Nothing much to do on cn9k */
875 if (roc_model_is_cn9k()) {
876 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
881 inl_dev = idev->nix_inl_dev;
883 if (!inl_dev && roc_nix == NULL)
887 nix = roc_nix_to_nix_priv(roc_nix);
888 outb_lf = nix->cpt_lf_base;
890 if (inb && !nix->inb_inl_dev)
894 if (inb && get_inl_lf) {
896 if (inl_dev && inl_dev->attach_cptlf)
897 outb_lf = &inl_dev->cpt_lf;
901 rbase = outb_lf->rbase;
904 rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
907 /* Trigger CTX flush to write dirty data back to DRAM */
908 flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
909 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
913 plt_nix_dbg("Could not get CPT LF for CTX write");
918 roc_nix_inl_dev_lock(void)
920 struct idev_cfg *idev = idev_get_cfg();
923 plt_spinlock_lock(&idev->nix_inl_dev_lock);
927 roc_nix_inl_dev_unlock(void)
929 struct idev_cfg *idev = idev_get_cfg();
932 plt_spinlock_unlock(&idev->nix_inl_dev_lock);