1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
9 1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
10 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
11 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
12 1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
13 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
14 1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
15 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
16 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
17 1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
20 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
22 uint16_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
23 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
24 struct roc_nix_ipsec_cfg cfg;
29 /* CN9K SA size is different */
30 if (roc_model_is_cn9k())
31 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
33 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
35 /* Alloc contiguous memory for Inbound SA's */
36 nix->inb_sa_sz = inb_sa_sz;
37 nix->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
38 ROC_NIX_INL_SA_BASE_ALIGN);
39 if (!nix->inb_sa_base) {
40 plt_err("Failed to allocate memory for Inbound SA");
43 if (roc_model_is_cn10k()) {
44 for (i = 0; i < ipsec_in_max_spi; i++) {
45 sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
46 roc_ot_ipsec_inb_sa_init(sa, true);
50 memset(&cfg, 0, sizeof(cfg));
51 cfg.sa_size = inb_sa_sz;
52 cfg.iova = (uintptr_t)nix->inb_sa_base;
53 cfg.max_sa = ipsec_in_max_spi + 1;
54 cfg.tt = SSO_TT_ORDERED;
56 /* Setup device specific inb SA table */
57 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
59 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
65 plt_free(nix->inb_sa_base);
66 nix->inb_sa_base = NULL;
71 nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
73 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
76 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
78 plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
82 plt_free(nix->inb_sa_base);
83 nix->inb_sa_base = NULL;
88 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
90 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
92 /* NIX Inline config needs to be done */
93 if (!nix->inl_outb_ena || !nix->cpt_lf_base)
96 return (struct roc_cpt_lf *)nix->cpt_lf_base;
100 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
102 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
104 return (uintptr_t)nix->outb_sa_base;
108 roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
110 struct idev_cfg *idev = idev_get_cfg();
111 struct nix_inl_dev *inl_dev;
112 struct nix *nix = NULL;
117 if (!inb_inl_dev && roc_nix == NULL)
121 nix = roc_nix_to_nix_priv(roc_nix);
122 if (!nix->inl_inb_ena)
127 inl_dev = idev->nix_inl_dev;
128 /* Return inline dev sa base */
130 return (uintptr_t)inl_dev->inb_sa_base;
134 return (uintptr_t)nix->inb_sa_base;
138 roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
140 struct idev_cfg *idev = idev_get_cfg();
141 struct nix_inl_dev *inl_dev;
147 if (!inb_inl_dev && roc_nix == NULL)
151 nix = roc_nix_to_nix_priv(roc_nix);
152 if (!nix->inl_inb_ena)
157 inl_dev = idev->nix_inl_dev;
159 return inl_dev->ipsec_in_max_spi;
163 return roc_nix->ipsec_in_max_spi;
167 roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
169 struct idev_cfg *idev = idev_get_cfg();
170 struct nix_inl_dev *inl_dev;
176 if (!inl_dev_sa && roc_nix == NULL)
180 nix = roc_nix_to_nix_priv(roc_nix);
182 return nix->inb_sa_sz;
186 inl_dev = idev->nix_inl_dev;
188 return inl_dev->inb_sa_sz;
195 roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
201 sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
202 /* Check if SA base exists */
206 /* Check if SPI is in range */
207 max_spi = roc_nix_inl_inb_sa_max_spi(roc_nix, inb_inl_dev);
209 plt_err("Inbound SA SPI %u exceeds max %u", spi, max_spi);
214 sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
218 /* Basic logic of SPI->SA for now */
219 return (sa_base + (spi * sz));
223 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
225 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
226 struct idev_cfg *idev = idev_get_cfg();
227 struct roc_cpt *roc_cpt;
234 /* Unless we have another mechanism to trigger
235 * onetime Inline config in CPTPF, we cannot
236 * support without CPT being probed.
240 plt_err("Cannot support inline inbound, cryptodev not probed");
244 if (roc_model_is_cn9k()) {
245 param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
247 union roc_ot_ipsec_inb_param1 u;
250 u.s.esp_trailer_disable = 1;
254 /* Do onetime Inbound Inline config in CPTPF */
255 rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
256 if (rc && rc != -EEXIST) {
257 plt_err("Failed to setup inbound lf, rc=%d", rc);
261 /* Setup Inbound SA table */
262 rc = nix_inl_inb_sa_tbl_setup(roc_nix);
266 nix->inl_inb_ena = true;
271 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
273 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
275 if (!nix->inl_inb_ena)
278 nix->inl_inb_ena = false;
280 /* Flush Inbound CTX cache entries */
281 roc_nix_cpt_ctx_cache_sync(roc_nix);
283 /* Disable Inbound SA */
284 return nix_inl_sa_tbl_release(roc_nix);
288 roc_nix_inl_outb_init(struct roc_nix *roc_nix)
290 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
291 struct idev_cfg *idev = idev_get_cfg();
292 struct roc_cpt_lf *lf_base, *lf;
293 struct dev *dev = &nix->dev;
294 struct msix_offset_rsp *rsp;
295 struct nix_inl_dev *inl_dev;
308 nb_lf = roc_nix->outb_nb_crypto_qs;
309 blkaddr = nix->is_nix1 ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
311 /* Retrieve inline device if present */
312 inl_dev = idev->nix_inl_dev;
313 sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
314 /* Use sso_pffunc if explicitly requested */
315 if (roc_nix->ipsec_out_sso_pffunc)
316 sso_pffunc = idev_sso_pffunc_get();
319 plt_err("Failed to setup inline outb, need either "
320 "inline device or sso device");
324 /* Attach CPT LF for outbound */
325 rc = cpt_lfs_attach(dev, blkaddr, true, nb_lf);
327 plt_err("Failed to attach CPT LF for inline outb, rc=%d", rc);
332 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
333 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
334 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
335 rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
336 !roc_nix->ipsec_out_sso_pffunc);
338 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
342 /* Get msix offsets */
343 rc = cpt_get_msix_offset(dev, &rsp);
345 plt_err("Failed to get CPT LF msix offset, rc=%d", rc);
349 mbox_memcpy(nix->cpt_msixoff,
350 nix->is_nix1 ? rsp->cpt1_lf_msixoff : rsp->cptlf_msixoff,
351 sizeof(nix->cpt_msixoff));
353 /* Alloc required num of cpt lfs */
354 lf_base = plt_zmalloc(nb_lf * sizeof(struct roc_cpt_lf), 0);
356 plt_err("Failed to alloc cpt lf memory");
361 /* Initialize CPT LF's */
362 for (i = 0; i < nb_lf; i++) {
366 lf->nb_desc = roc_nix->outb_nb_desc;
368 lf->msixoff = nix->cpt_msixoff[i];
369 lf->pci_dev = nix->pci_dev;
371 /* Setup CPT LF instruction queue */
372 rc = cpt_lf_init(lf);
374 plt_err("Failed to initialize CPT LF, rc=%d", rc);
378 /* Associate this CPT LF with NIX PFFUNC */
379 rc = cpt_lf_outb_cfg(dev, sso_pffunc, nix->dev.pf_func, i,
382 plt_err("Failed to setup CPT LF->(NIX,SSO) link, rc=%d",
388 roc_cpt_iq_enable(lf);
391 if (!roc_nix->ipsec_out_max_sa)
394 /* CN9K SA size is different */
395 if (roc_model_is_cn9k())
396 sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
398 sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
399 /* Alloc contiguous memory of outbound SA */
400 sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
401 ROC_NIX_INL_SA_BASE_ALIGN);
403 plt_err("Outbound SA base alloc failed");
406 if (roc_model_is_cn10k()) {
407 for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
408 sa = ((uint8_t *)sa_base) + (i * sa_sz);
409 roc_ot_ipsec_outb_sa_init(sa);
412 nix->outb_sa_base = sa_base;
413 nix->outb_sa_sz = sa_sz;
417 nix->cpt_lf_base = lf_base;
418 nix->nb_cpt_lf = nb_lf;
419 nix->outb_err_sso_pffunc = sso_pffunc;
420 nix->inl_outb_ena = true;
424 for (j = i - 1; j >= 0; j--)
425 cpt_lf_fini(&lf_base[j]);
428 rc |= cpt_lfs_free(dev);
430 rc |= cpt_lfs_detach(dev);
435 roc_nix_inl_outb_fini(struct roc_nix *roc_nix)
437 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
438 struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
439 struct dev *dev = &nix->dev;
442 if (!nix->inl_outb_ena)
445 nix->inl_outb_ena = false;
447 /* Cleanup CPT LF instruction queue */
448 for (i = 0; i < nix->nb_cpt_lf; i++)
449 cpt_lf_fini(&lf_base[i]);
451 /* Free LF resources */
452 rc = cpt_lfs_free(dev);
454 plt_err("Failed to free CPT LF resources, rc=%d", rc);
458 rc = cpt_lfs_detach(dev);
460 plt_err("Failed to detach CPT LF, rc=%d", rc);
464 nix->cpt_lf_base = NULL;
467 /* Free outbound SA base */
468 plt_free(nix->outb_sa_base);
469 nix->outb_sa_base = NULL;
476 roc_nix_inl_dev_is_probed(void)
478 struct idev_cfg *idev = idev_get_cfg();
483 return !!idev->nix_inl_dev;
487 roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix)
489 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
491 return nix->inl_inb_ena;
495 roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
497 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
499 return nix->inl_outb_ena;
503 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
505 struct idev_cfg *idev = idev_get_cfg();
506 struct nix_inl_dev *inl_dev;
507 struct roc_nix_rq *inl_rq;
514 inl_dev = idev->nix_inl_dev;
515 /* Nothing to do if no inline device */
519 /* Just take reference if already inited */
520 if (inl_dev->rq_refs) {
522 rq->inl_dev_ref = true;
527 inl_rq = &inl_dev->rq;
528 memset(inl_rq, 0, sizeof(struct roc_nix_rq));
530 /* Take RQ pool attributes from the first ethdev RQ */
532 inl_rq->aura_handle = rq->aura_handle;
533 inl_rq->first_skip = rq->first_skip;
534 inl_rq->later_skip = rq->later_skip;
535 inl_rq->lpb_size = rq->lpb_size;
536 inl_rq->lpb_drop_ena = true;
537 inl_rq->spb_ena = rq->spb_ena;
538 inl_rq->spb_aura_handle = rq->spb_aura_handle;
539 inl_rq->spb_size = rq->spb_size;
540 inl_rq->spb_drop_ena = !!rq->spb_ena;
542 if (!roc_model_is_cn9k()) {
543 uint64_t aura_limit =
544 roc_npa_aura_op_limit_get(inl_rq->aura_handle);
545 uint64_t aura_shift = plt_log2_u32(aura_limit);
546 uint64_t aura_drop, drop_pc;
551 aura_shift = aura_shift - 8;
553 /* Set first pass RQ to drop after part of buffers are in
554 * use to avoid metabuf alloc failure. This is needed as long
555 * as we cannot use different aura.
557 drop_pc = inl_dev->lpb_drop_pc;
558 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
559 roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
562 if (inl_rq->spb_ena) {
563 uint64_t aura_limit =
564 roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
565 uint64_t aura_shift = plt_log2_u32(aura_limit);
566 uint64_t aura_drop, drop_pc;
571 aura_shift = aura_shift - 8;
573 /* Set first pass RQ to drop after part of buffers are in
574 * use to avoid metabuf alloc failure. This is needed as long
575 * as we cannot use different aura.
577 drop_pc = inl_dev->spb_drop_pc;
578 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
579 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
583 inl_rq->ipsech_ena = true;
585 inl_rq->flow_tag_width = 20;
586 /* Special tag mask */
587 inl_rq->tag_mask = 0xFFF00000;
588 inl_rq->tt = SSO_TT_ORDERED;
590 inl_rq->wqe_skip = inl_dev->wqe_skip;
591 inl_rq->sso_ena = true;
593 /* Prepare and send RQ init mbox */
594 if (roc_model_is_cn9k())
595 rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
597 rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
599 plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
603 rc = mbox_process(dev->mbox);
605 plt_err("Failed to send aq_enq msg, rc=%d", rc);
610 rq->inl_dev_ref = true;
615 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
617 struct idev_cfg *idev = idev_get_cfg();
618 struct nix_inl_dev *inl_dev;
619 struct roc_nix_rq *inl_rq;
626 if (!rq->inl_dev_ref)
629 inl_dev = idev->nix_inl_dev;
630 /* Inline device should be there if we have ref */
632 plt_err("Failed to find inline device with refs");
636 rq->inl_dev_ref = false;
638 if (inl_dev->rq_refs)
642 inl_rq = &inl_dev->rq;
643 /* There are no more references, disable RQ */
644 rc = nix_rq_ena_dis(dev, inl_rq, false);
646 plt_err("Failed to disable inline device rq, rc=%d", rc);
648 roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
650 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
652 /* Flush NIX LF for CN10K */
653 nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
659 roc_nix_inl_dev_rq_limit_get(void)
661 struct idev_cfg *idev = idev_get_cfg();
662 struct nix_inl_dev *inl_dev;
663 struct roc_nix_rq *inl_rq;
665 if (!idev || !idev->nix_inl_dev)
668 inl_dev = idev->nix_inl_dev;
669 if (!inl_dev->rq_refs)
672 inl_rq = &inl_dev->rq;
674 return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
678 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
680 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
682 /* Info used by NPC flow rule add */
683 nix->inb_inl_dev = use_inl_dev;
687 roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
689 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
691 return nix->inb_inl_dev;
695 roc_nix_inl_dev_rq(void)
697 struct idev_cfg *idev = idev_get_cfg();
698 struct nix_inl_dev *inl_dev;
701 inl_dev = idev->nix_inl_dev;
702 if (inl_dev != NULL && inl_dev->rq_refs)
710 roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix)
712 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
714 return nix->outb_err_sso_pffunc;
718 roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args)
720 struct idev_cfg *idev = idev_get_cfg();
721 struct nix_inl_dev *inl_dev;
726 inl_dev = idev->nix_inl_dev;
730 /* Be silent if registration called with same cb and args */
731 if (inl_dev->work_cb == cb && inl_dev->cb_args == args)
734 /* Don't allow registration again if registered with different cb */
735 if (inl_dev->work_cb)
738 inl_dev->work_cb = cb;
739 inl_dev->cb_args = args;
744 roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb, void *args)
746 struct idev_cfg *idev = idev_get_cfg();
747 struct nix_inl_dev *inl_dev;
752 inl_dev = idev->nix_inl_dev;
756 if (inl_dev->work_cb != cb || inl_dev->cb_args != args)
759 inl_dev->work_cb = NULL;
760 inl_dev->cb_args = NULL;
765 roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
768 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
769 struct roc_nix_ipsec_cfg cfg;
771 /* Be silent if inline inbound not enabled */
772 if (!nix->inl_inb_ena)
775 memset(&cfg, 0, sizeof(cfg));
776 cfg.sa_size = nix->inb_sa_sz;
777 cfg.iova = (uintptr_t)nix->inb_sa_base;
778 cfg.max_sa = roc_nix->ipsec_in_max_spi + 1;
780 cfg.tag_const = tag_const;
782 return roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
786 roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
787 enum roc_nix_inl_sa_sync_op op)
789 struct idev_cfg *idev = idev_get_cfg();
790 struct nix_inl_dev *inl_dev = NULL;
791 struct roc_cpt_lf *outb_lf = NULL;
792 union cpt_lf_ctx_reload reload;
793 union cpt_lf_ctx_flush flush;
794 bool get_inl_lf = true;
798 /* Nothing much to do on cn9k */
799 if (roc_model_is_cn9k()) {
800 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
805 inl_dev = idev->nix_inl_dev;
807 if (!inl_dev && roc_nix == NULL)
811 nix = roc_nix_to_nix_priv(roc_nix);
812 outb_lf = nix->cpt_lf_base;
813 if (inb && !nix->inb_inl_dev)
817 if (inb && get_inl_lf) {
819 if (inl_dev && inl_dev->attach_cptlf)
820 outb_lf = &inl_dev->cpt_lf;
824 rbase = outb_lf->rbase;
829 case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
832 case ROC_NIX_INL_SA_OP_FLUSH:
833 flush.s.cptr = ((uintptr_t)sa) >> 7;
834 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
836 case ROC_NIX_INL_SA_OP_RELOAD:
837 reload.s.cptr = ((uintptr_t)sa) >> 7;
838 plt_write64(reload.u, rbase + CPT_LF_CTX_RELOAD);
845 plt_err("Could not get CPT LF for SA sync");
850 roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
851 bool inb, uint16_t sa_len)
853 struct idev_cfg *idev = idev_get_cfg();
854 struct nix_inl_dev *inl_dev = NULL;
855 struct roc_cpt_lf *outb_lf = NULL;
856 union cpt_lf_ctx_flush flush;
857 bool get_inl_lf = true;
862 /* Nothing much to do on cn9k */
863 if (roc_model_is_cn9k()) {
864 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
869 inl_dev = idev->nix_inl_dev;
871 if (!inl_dev && roc_nix == NULL)
875 nix = roc_nix_to_nix_priv(roc_nix);
876 outb_lf = nix->cpt_lf_base;
878 if (inb && !nix->inb_inl_dev)
882 if (inb && get_inl_lf) {
884 if (inl_dev && inl_dev->attach_cptlf)
885 outb_lf = &inl_dev->cpt_lf;
889 rbase = outb_lf->rbase;
892 rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
895 /* Trigger CTX flush to write dirty data back to DRAM */
896 flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
897 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
901 plt_nix_dbg("Could not get CPT LF for CTX write");
906 roc_nix_inl_dev_lock(void)
908 struct idev_cfg *idev = idev_get_cfg();
911 plt_spinlock_lock(&idev->nix_inl_dev_lock);
915 roc_nix_inl_dev_unlock(void)
917 struct idev_cfg *idev = idev_get_cfg();
920 plt_spinlock_unlock(&idev->nix_inl_dev_lock);