1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
9 1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
10 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
11 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
12 1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
13 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
14 1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
15 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
16 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
17 1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
20 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
22 uint16_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
23 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
24 struct roc_nix_ipsec_cfg cfg;
29 /* CN9K SA size is different */
30 if (roc_model_is_cn9k())
31 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
33 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
35 /* Alloc contiguous memory for Inbound SA's */
36 nix->inb_sa_sz = inb_sa_sz;
37 nix->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
38 ROC_NIX_INL_SA_BASE_ALIGN);
39 if (!nix->inb_sa_base) {
40 plt_err("Failed to allocate memory for Inbound SA");
43 if (roc_model_is_cn10k()) {
44 for (i = 0; i < ipsec_in_max_spi; i++) {
45 sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
46 roc_nix_inl_inb_sa_init(sa);
50 memset(&cfg, 0, sizeof(cfg));
51 cfg.sa_size = inb_sa_sz;
52 cfg.iova = (uintptr_t)nix->inb_sa_base;
53 cfg.max_sa = ipsec_in_max_spi + 1;
54 cfg.tt = SSO_TT_ORDERED;
56 /* Setup device specific inb SA table */
57 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
59 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
65 plt_free(nix->inb_sa_base);
66 nix->inb_sa_base = NULL;
71 nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
73 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
76 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
78 plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
82 plt_free(nix->inb_sa_base);
83 nix->inb_sa_base = NULL;
88 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
90 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
92 /* NIX Inline config needs to be done */
93 if (!nix->inl_outb_ena || !nix->cpt_lf_base)
96 return (struct roc_cpt_lf *)nix->cpt_lf_base;
100 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
102 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
104 return (uintptr_t)nix->outb_sa_base;
108 roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
110 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
111 struct idev_cfg *idev = idev_get_cfg();
112 struct nix_inl_dev *inl_dev;
117 if (!nix->inl_inb_ena)
120 inl_dev = idev->nix_inl_dev;
122 /* Return inline dev sa base */
124 return (uintptr_t)inl_dev->inb_sa_base;
128 return (uintptr_t)nix->inb_sa_base;
132 roc_nix_inl_inb_sa_max_spi(struct roc_nix *roc_nix, bool inb_inl_dev)
134 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
135 struct idev_cfg *idev = idev_get_cfg();
136 struct nix_inl_dev *inl_dev;
141 if (!nix->inl_inb_ena)
144 inl_dev = idev->nix_inl_dev;
147 return inl_dev->ipsec_in_max_spi;
151 return roc_nix->ipsec_in_max_spi;
155 roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
157 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
158 struct idev_cfg *idev = idev_get_cfg();
159 struct nix_inl_dev *inl_dev;
165 return nix->inb_sa_sz;
167 inl_dev = idev->nix_inl_dev;
168 if (inl_dev_sa && inl_dev)
169 return inl_dev->inb_sa_sz;
176 roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
182 sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
183 /* Check if SA base exists */
187 /* Check if SPI is in range */
188 max_spi = roc_nix_inl_inb_sa_max_spi(roc_nix, inb_inl_dev);
190 plt_err("Inbound SA SPI %u exceeds max %u", spi, max_spi);
195 sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
199 /* Basic logic of SPI->SA for now */
200 return (sa_base + (spi * sz));
204 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
206 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
207 struct idev_cfg *idev = idev_get_cfg();
208 struct roc_cpt *roc_cpt;
215 /* Unless we have another mechanism to trigger
216 * onetime Inline config in CPTPF, we cannot
217 * support without CPT being probed.
221 plt_err("Cannot support inline inbound, cryptodev not probed");
225 if (roc_model_is_cn9k()) {
226 param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
228 union roc_ot_ipsec_inb_param1 u;
231 u.s.esp_trailer_disable = 1;
235 /* Do onetime Inbound Inline config in CPTPF */
236 rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
237 if (rc && rc != -EEXIST) {
238 plt_err("Failed to setup inbound lf, rc=%d", rc);
242 /* Setup Inbound SA table */
243 rc = nix_inl_inb_sa_tbl_setup(roc_nix);
247 nix->inl_inb_ena = true;
252 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
254 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
256 if (!nix->inl_inb_ena)
259 nix->inl_inb_ena = false;
261 /* Disable Inbound SA */
262 return nix_inl_sa_tbl_release(roc_nix);
266 roc_nix_inl_outb_init(struct roc_nix *roc_nix)
268 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
269 struct idev_cfg *idev = idev_get_cfg();
270 struct roc_cpt_lf *lf_base, *lf;
271 struct dev *dev = &nix->dev;
272 struct msix_offset_rsp *rsp;
273 struct nix_inl_dev *inl_dev;
286 nb_lf = roc_nix->outb_nb_crypto_qs;
287 blkaddr = nix->is_nix1 ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
289 /* Retrieve inline device if present */
290 inl_dev = idev->nix_inl_dev;
291 sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
293 plt_err("Failed to setup inline outb, need either "
294 "inline device or sso device");
298 /* Attach CPT LF for outbound */
299 rc = cpt_lfs_attach(dev, blkaddr, true, nb_lf);
301 plt_err("Failed to attach CPT LF for inline outb, rc=%d", rc);
306 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
307 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
308 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
309 rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr, true);
311 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
315 /* Get msix offsets */
316 rc = cpt_get_msix_offset(dev, &rsp);
318 plt_err("Failed to get CPT LF msix offset, rc=%d", rc);
322 mbox_memcpy(nix->cpt_msixoff,
323 nix->is_nix1 ? rsp->cpt1_lf_msixoff : rsp->cptlf_msixoff,
324 sizeof(nix->cpt_msixoff));
326 /* Alloc required num of cpt lfs */
327 lf_base = plt_zmalloc(nb_lf * sizeof(struct roc_cpt_lf), 0);
329 plt_err("Failed to alloc cpt lf memory");
334 /* Initialize CPT LF's */
335 for (i = 0; i < nb_lf; i++) {
339 lf->nb_desc = roc_nix->outb_nb_desc;
341 lf->msixoff = nix->cpt_msixoff[i];
342 lf->pci_dev = nix->pci_dev;
344 /* Setup CPT LF instruction queue */
345 rc = cpt_lf_init(lf);
347 plt_err("Failed to initialize CPT LF, rc=%d", rc);
351 /* Associate this CPT LF with NIX PFFUNC */
352 rc = cpt_lf_outb_cfg(dev, sso_pffunc, nix->dev.pf_func, i,
355 plt_err("Failed to setup CPT LF->(NIX,SSO) link, rc=%d",
361 roc_cpt_iq_enable(lf);
364 if (!roc_nix->ipsec_out_max_sa)
367 /* CN9K SA size is different */
368 if (roc_model_is_cn9k())
369 sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
371 sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
372 /* Alloc contiguous memory of outbound SA */
373 sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
374 ROC_NIX_INL_SA_BASE_ALIGN);
376 plt_err("Outbound SA base alloc failed");
379 if (roc_model_is_cn10k()) {
380 for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
381 sa = ((uint8_t *)sa_base) + (i * sa_sz);
382 roc_nix_inl_outb_sa_init(sa);
385 nix->outb_sa_base = sa_base;
386 nix->outb_sa_sz = sa_sz;
390 nix->cpt_lf_base = lf_base;
391 nix->nb_cpt_lf = nb_lf;
392 nix->outb_err_sso_pffunc = sso_pffunc;
393 nix->inl_outb_ena = true;
397 for (j = i - 1; j >= 0; j--)
398 cpt_lf_fini(&lf_base[j]);
401 rc |= cpt_lfs_free(dev);
403 rc |= cpt_lfs_detach(dev);
408 roc_nix_inl_outb_fini(struct roc_nix *roc_nix)
410 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
411 struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
412 struct dev *dev = &nix->dev;
415 if (!nix->inl_outb_ena)
418 nix->inl_outb_ena = false;
420 /* Cleanup CPT LF instruction queue */
421 for (i = 0; i < nix->nb_cpt_lf; i++)
422 cpt_lf_fini(&lf_base[i]);
424 /* Free LF resources */
425 rc = cpt_lfs_free(dev);
427 plt_err("Failed to free CPT LF resources, rc=%d", rc);
431 rc = cpt_lfs_detach(dev);
433 plt_err("Failed to detach CPT LF, rc=%d", rc);
437 nix->cpt_lf_base = NULL;
440 /* Free outbound SA base */
441 plt_free(nix->outb_sa_base);
442 nix->outb_sa_base = NULL;
449 roc_nix_inl_dev_is_probed(void)
451 struct idev_cfg *idev = idev_get_cfg();
456 return !!idev->nix_inl_dev;
460 roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix)
462 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
464 return nix->inl_inb_ena;
468 roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
470 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
472 return nix->inl_outb_ena;
476 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
478 struct idev_cfg *idev = idev_get_cfg();
479 struct nix_inl_dev *inl_dev;
480 struct roc_nix_rq *inl_rq;
487 inl_dev = idev->nix_inl_dev;
488 /* Nothing to do if no inline device */
492 /* Just take reference if already inited */
493 if (inl_dev->rq_refs) {
495 rq->inl_dev_ref = true;
500 inl_rq = &inl_dev->rq;
501 memset(inl_rq, 0, sizeof(struct roc_nix_rq));
503 /* Take RQ pool attributes from the first ethdev RQ */
505 inl_rq->aura_handle = rq->aura_handle;
506 inl_rq->first_skip = rq->first_skip;
507 inl_rq->later_skip = rq->later_skip;
508 inl_rq->lpb_size = rq->lpb_size;
510 if (!roc_model_is_cn9k()) {
511 uint64_t aura_limit =
512 roc_npa_aura_op_limit_get(inl_rq->aura_handle);
513 uint64_t aura_shift = plt_log2_u32(aura_limit);
518 aura_shift = aura_shift - 8;
520 /* Set first pass RQ to drop when half of the buffers are in
521 * use to avoid metabuf alloc failure. This is needed as long
522 * as we cannot use different
524 inl_rq->red_pass = (aura_limit / 2) >> aura_shift;
525 inl_rq->red_drop = ((aura_limit / 2) - 1) >> aura_shift;
529 inl_rq->ipsech_ena = true;
531 inl_rq->flow_tag_width = 20;
532 /* Special tag mask */
533 inl_rq->tag_mask = 0xFFF00000;
534 inl_rq->tt = SSO_TT_ORDERED;
536 inl_rq->wqe_skip = 1;
537 inl_rq->sso_ena = true;
539 /* Prepare and send RQ init mbox */
540 if (roc_model_is_cn9k())
541 rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
543 rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
545 plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
549 rc = mbox_process(dev->mbox);
551 plt_err("Failed to send aq_enq msg, rc=%d", rc);
556 rq->inl_dev_ref = true;
561 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
563 struct idev_cfg *idev = idev_get_cfg();
564 struct nix_inl_dev *inl_dev;
565 struct roc_nix_rq *inl_rq;
572 if (!rq->inl_dev_ref)
575 inl_dev = idev->nix_inl_dev;
576 /* Inline device should be there if we have ref */
578 plt_err("Failed to find inline device with refs");
582 rq->inl_dev_ref = false;
584 if (inl_dev->rq_refs)
588 inl_rq = &inl_dev->rq;
589 /* There are no more references, disable RQ */
590 rc = nix_rq_ena_dis(dev, inl_rq, false);
592 plt_err("Failed to disable inline device rq, rc=%d", rc);
594 /* Flush NIX LF for CN10K */
595 if (roc_model_is_cn10k())
596 plt_write64(0, inl_dev->nix_base + NIX_LF_OP_VWQE_FLUSH);
602 roc_nix_inl_dev_rq_limit_get(void)
604 struct idev_cfg *idev = idev_get_cfg();
605 struct nix_inl_dev *inl_dev;
606 struct roc_nix_rq *inl_rq;
608 if (!idev || !idev->nix_inl_dev)
611 inl_dev = idev->nix_inl_dev;
612 if (!inl_dev->rq_refs)
615 inl_rq = &inl_dev->rq;
617 return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
621 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
623 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
625 /* Info used by NPC flow rule add */
626 nix->inb_inl_dev = use_inl_dev;
630 roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
632 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
634 return nix->inb_inl_dev;
638 roc_nix_inl_dev_rq(void)
640 struct idev_cfg *idev = idev_get_cfg();
641 struct nix_inl_dev *inl_dev;
644 inl_dev = idev->nix_inl_dev;
645 if (inl_dev != NULL && inl_dev->rq_refs)
653 roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix)
655 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
657 return nix->outb_err_sso_pffunc;
661 roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args)
663 struct idev_cfg *idev = idev_get_cfg();
664 struct nix_inl_dev *inl_dev;
669 inl_dev = idev->nix_inl_dev;
673 /* Be silent if registration called with same cb and args */
674 if (inl_dev->work_cb == cb && inl_dev->cb_args == args)
677 /* Don't allow registration again if registered with different cb */
678 if (inl_dev->work_cb)
681 inl_dev->work_cb = cb;
682 inl_dev->cb_args = args;
687 roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb, void *args)
689 struct idev_cfg *idev = idev_get_cfg();
690 struct nix_inl_dev *inl_dev;
695 inl_dev = idev->nix_inl_dev;
699 if (inl_dev->work_cb != cb || inl_dev->cb_args != args)
702 inl_dev->work_cb = NULL;
703 inl_dev->cb_args = NULL;
708 roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
711 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
712 struct roc_nix_ipsec_cfg cfg;
714 /* Be silent if inline inbound not enabled */
715 if (!nix->inl_inb_ena)
718 memset(&cfg, 0, sizeof(cfg));
719 cfg.sa_size = nix->inb_sa_sz;
720 cfg.iova = (uintptr_t)nix->inb_sa_base;
721 cfg.max_sa = roc_nix->ipsec_in_max_spi + 1;
723 cfg.tag_const = tag_const;
725 return roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
729 roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
730 enum roc_nix_inl_sa_sync_op op)
732 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
733 struct roc_cpt_lf *outb_lf = nix->cpt_lf_base;
734 struct idev_cfg *idev = idev_get_cfg();
735 struct nix_inl_dev *inl_dev = NULL;
736 union cpt_lf_ctx_reload reload;
737 union cpt_lf_ctx_flush flush;
740 /* Nothing much to do on cn9k */
741 if (roc_model_is_cn9k()) {
742 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
746 if (inb && nix->inb_inl_dev) {
749 inl_dev = idev->nix_inl_dev;
751 outb_lf = &inl_dev->cpt_lf;
755 rbase = outb_lf->rbase;
760 case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
763 case ROC_NIX_INL_SA_OP_FLUSH:
764 flush.s.cptr = ((uintptr_t)sa) >> 7;
765 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
767 case ROC_NIX_INL_SA_OP_RELOAD:
768 reload.s.cptr = ((uintptr_t)sa) >> 7;
769 plt_write64(reload.u, rbase + CPT_LF_CTX_RELOAD);
776 plt_err("Could not get CPT LF for SA sync");
781 roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
782 bool inb, uint16_t sa_len)
784 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
785 struct roc_cpt_lf *outb_lf = nix->cpt_lf_base;
786 struct idev_cfg *idev = idev_get_cfg();
787 struct nix_inl_dev *inl_dev = NULL;
788 union cpt_lf_ctx_flush flush;
792 /* Nothing much to do on cn9k */
793 if (roc_model_is_cn9k()) {
794 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
798 if (inb && nix->inb_inl_dev) {
801 inl_dev = idev->nix_inl_dev;
802 if (inl_dev && inl_dev->attach_cptlf)
803 outb_lf = &inl_dev->cpt_lf;
807 rbase = outb_lf->rbase;
810 rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
813 /* Trigger CTX flush to write dirty data back to DRAM */
814 flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
815 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
819 plt_nix_dbg("Could not get CPT LF for CTX write");
824 roc_nix_inl_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa)
828 memset(sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
830 offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
831 sa->w0.s.hw_ctx_off = offset / ROC_CTX_UNIT_8B;
832 sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
833 sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN;
834 sa->w0.s.aop_valid = 1;
838 roc_nix_inl_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa)
842 memset(sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
844 offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx);
845 sa->w0.s.ctx_push_size = (offset / ROC_CTX_UNIT_8B);
846 sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN;
847 sa->w0.s.aop_valid = 1;
851 roc_nix_inl_dev_lock(void)
853 struct idev_cfg *idev = idev_get_cfg();
856 plt_spinlock_lock(&idev->nix_inl_dev_lock);
860 roc_nix_inl_dev_unlock(void)
862 struct idev_cfg *idev = idev_get_cfg();
865 plt_spinlock_unlock(&idev->nix_inl_dev_lock);