1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 uint32_t soft_exp_consumer_cnt;
10 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
11 1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
12 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
13 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
14 1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
15 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
16 1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
17 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
18 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
19 1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
22 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
24 uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
25 uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
26 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
27 struct roc_nix_ipsec_cfg cfg;
33 max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
35 /* CN9K SA size is different */
36 if (roc_model_is_cn9k())
37 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
39 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
41 /* Alloc contiguous memory for Inbound SA's */
42 nix->inb_sa_sz = inb_sa_sz;
43 nix->inb_spi_mask = max_sa - 1;
44 nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
45 ROC_NIX_INL_SA_BASE_ALIGN);
46 if (!nix->inb_sa_base) {
47 plt_err("Failed to allocate memory for Inbound SA");
50 if (roc_model_is_cn10k()) {
51 for (i = 0; i < max_sa; i++) {
52 sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
53 roc_ot_ipsec_inb_sa_init(sa, true);
57 memset(&cfg, 0, sizeof(cfg));
58 cfg.sa_size = inb_sa_sz;
59 cfg.iova = (uintptr_t)nix->inb_sa_base;
61 cfg.tt = SSO_TT_ORDERED;
63 /* Setup device specific inb SA table */
64 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
66 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
72 plt_free(nix->inb_sa_base);
73 nix->inb_sa_base = NULL;
78 nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
80 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
83 rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
85 plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
89 plt_free(nix->inb_sa_base);
90 nix->inb_sa_base = NULL;
95 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
97 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
99 /* NIX Inline config needs to be done */
100 if (!nix->inl_outb_ena || !nix->cpt_lf_base)
103 return (struct roc_cpt_lf *)nix->cpt_lf_base;
107 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
109 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
111 return (uintptr_t)nix->outb_sa_base;
115 roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
117 struct idev_cfg *idev = idev_get_cfg();
118 struct nix_inl_dev *inl_dev;
119 struct nix *nix = NULL;
124 if (!inb_inl_dev && roc_nix == NULL)
128 nix = roc_nix_to_nix_priv(roc_nix);
129 if (!nix->inl_inb_ena)
134 inl_dev = idev->nix_inl_dev;
135 /* Return inline dev sa base */
137 return (uintptr_t)inl_dev->inb_sa_base;
141 return (uintptr_t)nix->inb_sa_base;
145 roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
146 uint32_t *min_spi, uint32_t *max_spi)
148 struct idev_cfg *idev = idev_get_cfg();
149 uint32_t min = 0, max = 0, mask = 0;
150 struct nix_inl_dev *inl_dev;
151 struct nix *nix = NULL;
156 if (!inb_inl_dev && roc_nix == NULL)
159 inl_dev = idev->nix_inl_dev;
163 min = inl_dev->ipsec_in_min_spi;
164 max = inl_dev->ipsec_in_max_spi;
165 mask = inl_dev->inb_spi_mask;
167 nix = roc_nix_to_nix_priv(roc_nix);
168 if (!nix->inl_inb_ena)
170 min = roc_nix->ipsec_in_min_spi;
171 max = roc_nix->ipsec_in_max_spi;
172 mask = nix->inb_spi_mask;
183 roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
185 struct idev_cfg *idev = idev_get_cfg();
186 struct nix_inl_dev *inl_dev;
192 if (!inl_dev_sa && roc_nix == NULL)
196 nix = roc_nix_to_nix_priv(roc_nix);
198 return nix->inb_sa_sz;
202 inl_dev = idev->nix_inl_dev;
204 return inl_dev->inb_sa_sz;
211 roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
213 uint32_t max_spi = 0, min_spi = 0, mask;
217 sa_base = roc_nix_inl_inb_sa_base_get(roc_nix, inb_inl_dev);
218 /* Check if SA base exists */
223 sz = roc_nix_inl_inb_sa_sz(roc_nix, inb_inl_dev);
227 if (roc_nix && roc_nix->custom_sa_action)
228 return (sa_base + (spi * sz));
230 /* Check if SPI is in range */
231 mask = roc_nix_inl_inb_spi_range(roc_nix, inb_inl_dev, &min_spi,
233 if (spi > max_spi || spi < min_spi)
234 plt_nix_dbg("Inbound SA SPI %u not in range (%u..%u)", spi,
237 /* Basic logic of SPI->SA for now */
238 return (sa_base + ((spi & mask) * sz));
242 roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
244 struct idev_cfg *idev = idev_get_cfg();
245 struct roc_cpt *roc_cpt;
246 struct roc_cpt_rxc_time_cfg cfg;
248 PLT_SET_USED(max_frags);
251 plt_err("Cannot support inline inbound, cryptodev not probed");
255 cfg.step = (max_wait_time * 1000 / ROC_NIX_INL_REAS_ACTIVE_LIMIT);
256 cfg.zombie_limit = ROC_NIX_INL_REAS_ZOMBIE_LIMIT;
257 cfg.zombie_thres = ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD;
258 cfg.active_limit = ROC_NIX_INL_REAS_ACTIVE_LIMIT;
259 cfg.active_thres = ROC_NIX_INL_REAS_ACTIVE_THRESHOLD;
261 return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
265 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
267 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
268 struct idev_cfg *idev = idev_get_cfg();
269 struct roc_cpt *roc_cpt;
276 /* Unless we have another mechanism to trigger
277 * onetime Inline config in CPTPF, we cannot
278 * support without CPT being probed.
282 plt_err("Cannot support inline inbound, cryptodev not probed");
286 if (roc_model_is_cn9k()) {
287 param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
289 union roc_ot_ipsec_inb_param1 u;
292 u.s.esp_trailer_disable = 1;
296 /* Do onetime Inbound Inline config in CPTPF */
297 rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
298 if (rc && rc != -EEXIST) {
299 plt_err("Failed to setup inbound lf, rc=%d", rc);
303 /* Setup Inbound SA table */
304 rc = nix_inl_inb_sa_tbl_setup(roc_nix);
308 nix->inl_inb_ena = true;
313 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
315 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
317 if (!nix->inl_inb_ena)
320 nix->inl_inb_ena = false;
322 /* Flush Inbound CTX cache entries */
323 roc_nix_cpt_ctx_cache_sync(roc_nix);
325 /* Disable Inbound SA */
326 return nix_inl_sa_tbl_release(roc_nix);
330 roc_nix_inl_outb_init(struct roc_nix *roc_nix)
332 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
333 struct idev_cfg *idev = idev_get_cfg();
334 struct roc_cpt_lf *lf_base, *lf;
335 struct dev *dev = &nix->dev;
336 struct msix_offset_rsp *rsp;
337 struct nix_inl_dev *inl_dev;
338 size_t sa_sz, ring_sz;
351 nb_lf = roc_nix->outb_nb_crypto_qs;
352 blkaddr = nix->is_nix1 ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
354 /* Retrieve inline device if present */
355 inl_dev = idev->nix_inl_dev;
356 sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
357 /* Use sso_pffunc if explicitly requested */
358 if (roc_nix->ipsec_out_sso_pffunc)
359 sso_pffunc = idev_sso_pffunc_get();
362 plt_err("Failed to setup inline outb, need either "
363 "inline device or sso device");
367 /* Attach CPT LF for outbound */
368 rc = cpt_lfs_attach(dev, blkaddr, true, nb_lf);
370 plt_err("Failed to attach CPT LF for inline outb, rc=%d", rc);
375 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
376 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
377 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
378 rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
379 !roc_nix->ipsec_out_sso_pffunc);
381 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
385 /* Get msix offsets */
386 rc = cpt_get_msix_offset(dev, &rsp);
388 plt_err("Failed to get CPT LF msix offset, rc=%d", rc);
392 mbox_memcpy(nix->cpt_msixoff,
393 nix->is_nix1 ? rsp->cpt1_lf_msixoff : rsp->cptlf_msixoff,
394 sizeof(nix->cpt_msixoff));
396 /* Alloc required num of cpt lfs */
397 lf_base = plt_zmalloc(nb_lf * sizeof(struct roc_cpt_lf), 0);
399 plt_err("Failed to alloc cpt lf memory");
404 /* Initialize CPT LF's */
405 for (i = 0; i < nb_lf; i++) {
409 lf->nb_desc = roc_nix->outb_nb_desc;
411 lf->msixoff = nix->cpt_msixoff[i];
412 lf->pci_dev = nix->pci_dev;
414 /* Setup CPT LF instruction queue */
415 rc = cpt_lf_init(lf);
417 plt_err("Failed to initialize CPT LF, rc=%d", rc);
421 /* Associate this CPT LF with NIX PFFUNC */
422 rc = cpt_lf_outb_cfg(dev, sso_pffunc, nix->dev.pf_func, i,
425 plt_err("Failed to setup CPT LF->(NIX,SSO) link, rc=%d",
431 roc_cpt_iq_enable(lf);
434 if (!roc_nix->ipsec_out_max_sa)
437 /* CN9K SA size is different */
438 if (roc_model_is_cn9k())
439 sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
441 sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
442 /* Alloc contiguous memory of outbound SA */
443 sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
444 ROC_NIX_INL_SA_BASE_ALIGN);
446 plt_err("Outbound SA base alloc failed");
449 if (roc_model_is_cn10k()) {
450 for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
451 sa = ((uint8_t *)sa_base) + (i * sa_sz);
452 roc_ot_ipsec_outb_sa_init(sa);
455 nix->outb_sa_base = sa_base;
456 nix->outb_sa_sz = sa_sz;
460 nix->cpt_lf_base = lf_base;
461 nix->nb_cpt_lf = nb_lf;
462 nix->outb_err_sso_pffunc = sso_pffunc;
463 nix->inl_outb_ena = true;
464 nix->outb_se_ring_cnt =
465 roc_nix->ipsec_out_max_sa / ROC_IPSEC_ERR_RING_MAX_ENTRY + 1;
466 nix->outb_se_ring_base =
467 roc_nix->port_id * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
469 if (inl_dev == NULL || !inl_dev->set_soft_exp_poll) {
470 nix->outb_se_ring_cnt = 0;
474 /* Allocate memory to be used as a ring buffer to poll for
475 * soft expiry event from ucode
477 ring_sz = (ROC_IPSEC_ERR_RING_MAX_ENTRY + 1) * sizeof(uint64_t);
478 ring_base = inl_dev->sa_soft_exp_ring;
479 for (i = 0; i < nix->outb_se_ring_cnt; i++) {
480 ring_base[nix->outb_se_ring_base + i] =
481 PLT_U64_CAST(plt_zmalloc(ring_sz, 0));
482 if (!ring_base[nix->outb_se_ring_base + i]) {
483 plt_err("Couldn't allocate memory for soft exp ring");
485 plt_free(PLT_PTR_CAST(
486 ring_base[nix->outb_se_ring_base + i]));
495 for (j = i - 1; j >= 0; j--)
496 cpt_lf_fini(&lf_base[j]);
499 rc |= cpt_lfs_free(dev);
501 rc |= cpt_lfs_detach(dev);
506 roc_nix_inl_outb_fini(struct roc_nix *roc_nix)
508 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
509 struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
510 struct idev_cfg *idev = idev_get_cfg();
511 struct dev *dev = &nix->dev;
512 struct nix_inl_dev *inl_dev;
516 if (!nix->inl_outb_ena)
519 nix->inl_outb_ena = false;
521 /* Cleanup CPT LF instruction queue */
522 for (i = 0; i < nix->nb_cpt_lf; i++)
523 cpt_lf_fini(&lf_base[i]);
525 /* Free LF resources */
526 rc = cpt_lfs_free(dev);
528 plt_err("Failed to free CPT LF resources, rc=%d", rc);
532 rc = cpt_lfs_detach(dev);
534 plt_err("Failed to detach CPT LF, rc=%d", rc);
538 nix->cpt_lf_base = NULL;
541 /* Free outbound SA base */
542 plt_free(nix->outb_sa_base);
543 nix->outb_sa_base = NULL;
545 if (idev && idev->nix_inl_dev && nix->outb_se_ring_cnt) {
546 inl_dev = idev->nix_inl_dev;
547 ring_base = inl_dev->sa_soft_exp_ring;
548 ring_base += nix->outb_se_ring_base;
550 for (i = 0; i < nix->outb_se_ring_cnt; i++) {
552 plt_free(PLT_PTR_CAST(ring_base[i]));
561 roc_nix_inl_dev_is_probed(void)
563 struct idev_cfg *idev = idev_get_cfg();
568 return !!idev->nix_inl_dev;
572 roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix)
574 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
576 return nix->inl_inb_ena;
580 roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
582 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
584 return nix->inl_outb_ena;
588 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
590 struct idev_cfg *idev = idev_get_cfg();
591 struct nix_inl_dev *inl_dev;
592 struct roc_nix_rq *inl_rq;
599 inl_dev = idev->nix_inl_dev;
600 /* Nothing to do if no inline device */
604 /* Just take reference if already inited */
605 if (inl_dev->rq_refs) {
607 rq->inl_dev_ref = true;
612 inl_rq = &inl_dev->rq;
613 memset(inl_rq, 0, sizeof(struct roc_nix_rq));
615 /* Take RQ pool attributes from the first ethdev RQ */
617 inl_rq->aura_handle = rq->aura_handle;
618 inl_rq->first_skip = rq->first_skip;
619 inl_rq->later_skip = rq->later_skip;
620 inl_rq->lpb_size = rq->lpb_size;
621 inl_rq->lpb_drop_ena = true;
622 inl_rq->spb_ena = rq->spb_ena;
623 inl_rq->spb_aura_handle = rq->spb_aura_handle;
624 inl_rq->spb_size = rq->spb_size;
625 inl_rq->spb_drop_ena = !!rq->spb_ena;
627 if (!roc_model_is_cn9k()) {
628 uint64_t aura_limit =
629 roc_npa_aura_op_limit_get(inl_rq->aura_handle);
630 uint64_t aura_shift = plt_log2_u32(aura_limit);
631 uint64_t aura_drop, drop_pc;
636 aura_shift = aura_shift - 8;
638 /* Set first pass RQ to drop after part of buffers are in
639 * use to avoid metabuf alloc failure. This is needed as long
640 * as we cannot use different aura.
642 drop_pc = inl_dev->lpb_drop_pc;
643 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
644 roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
647 if (inl_rq->spb_ena) {
648 uint64_t aura_limit =
649 roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
650 uint64_t aura_shift = plt_log2_u32(aura_limit);
651 uint64_t aura_drop, drop_pc;
656 aura_shift = aura_shift - 8;
658 /* Set first pass RQ to drop after part of buffers are in
659 * use to avoid metabuf alloc failure. This is needed as long
660 * as we cannot use different aura.
662 drop_pc = inl_dev->spb_drop_pc;
663 aura_drop = ((aura_limit * drop_pc) / 100) >> aura_shift;
664 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, aura_drop, true);
668 inl_rq->ipsech_ena = true;
670 inl_rq->flow_tag_width = 20;
671 /* Special tag mask */
672 inl_rq->tag_mask = rq->tag_mask;
673 inl_rq->tt = SSO_TT_ORDERED;
675 inl_rq->wqe_skip = inl_dev->wqe_skip;
676 inl_rq->sso_ena = true;
678 /* Prepare and send RQ init mbox */
679 if (roc_model_is_cn9k())
680 rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
682 rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
684 plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
688 rc = mbox_process(dev->mbox);
690 plt_err("Failed to send aq_enq msg, rc=%d", rc);
695 rq->inl_dev_ref = true;
700 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
702 struct idev_cfg *idev = idev_get_cfg();
703 struct nix_inl_dev *inl_dev;
704 struct roc_nix_rq *inl_rq;
711 if (!rq->inl_dev_ref)
714 inl_dev = idev->nix_inl_dev;
715 /* Inline device should be there if we have ref */
717 plt_err("Failed to find inline device with refs");
721 rq->inl_dev_ref = false;
723 if (inl_dev->rq_refs)
727 inl_rq = &inl_dev->rq;
728 /* There are no more references, disable RQ */
729 rc = nix_rq_ena_dis(dev, inl_rq, false);
731 plt_err("Failed to disable inline device rq, rc=%d", rc);
733 roc_npa_aura_drop_set(inl_rq->aura_handle, 0, false);
735 roc_npa_aura_drop_set(inl_rq->spb_aura_handle, 0, false);
737 /* Flush NIX LF for CN10K */
738 nix_rq_vwqe_flush(rq, inl_dev->vwqe_interval);
744 roc_nix_inl_dev_rq_limit_get(void)
746 struct idev_cfg *idev = idev_get_cfg();
747 struct nix_inl_dev *inl_dev;
748 struct roc_nix_rq *inl_rq;
750 if (!idev || !idev->nix_inl_dev)
753 inl_dev = idev->nix_inl_dev;
754 if (!inl_dev->rq_refs)
757 inl_rq = &inl_dev->rq;
759 return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
763 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
765 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
767 /* Info used by NPC flow rule add */
768 nix->inb_inl_dev = use_inl_dev;
772 roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
774 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
775 struct idev_cfg *idev = idev_get_cfg();
776 struct nix_inl_dev *inl_dev;
777 uint16_t ring_idx, i;
779 if (!idev || !idev->nix_inl_dev)
782 inl_dev = idev->nix_inl_dev;
784 for (i = 0; i < nix->outb_se_ring_cnt; i++) {
785 ring_idx = nix->outb_se_ring_base + i;
788 plt_bitmap_set(inl_dev->soft_exp_ring_bmap, ring_idx);
790 plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, ring_idx);
794 soft_exp_consumer_cnt++;
796 soft_exp_consumer_cnt--;
802 roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
804 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
806 return nix->inb_inl_dev;
810 roc_nix_inl_dev_rq(void)
812 struct idev_cfg *idev = idev_get_cfg();
813 struct nix_inl_dev *inl_dev;
816 inl_dev = idev->nix_inl_dev;
817 if (inl_dev != NULL && inl_dev->rq_refs)
825 roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix)
827 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
829 return nix->outb_err_sso_pffunc;
833 roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args)
835 struct idev_cfg *idev = idev_get_cfg();
836 struct nix_inl_dev *inl_dev;
841 inl_dev = idev->nix_inl_dev;
845 /* Be silent if registration called with same cb and args */
846 if (inl_dev->work_cb == cb && inl_dev->cb_args == args)
849 /* Don't allow registration again if registered with different cb */
850 if (inl_dev->work_cb)
853 inl_dev->work_cb = cb;
854 inl_dev->cb_args = args;
859 roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb, void *args)
861 struct idev_cfg *idev = idev_get_cfg();
862 struct nix_inl_dev *inl_dev;
867 inl_dev = idev->nix_inl_dev;
871 if (inl_dev->work_cb != cb || inl_dev->cb_args != args)
874 inl_dev->work_cb = NULL;
875 inl_dev->cb_args = NULL;
880 roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
883 struct nix *nix = roc_nix_to_nix_priv(roc_nix);
884 struct roc_nix_ipsec_cfg cfg;
886 /* Be silent if inline inbound not enabled */
887 if (!nix->inl_inb_ena)
890 memset(&cfg, 0, sizeof(cfg));
891 cfg.sa_size = nix->inb_sa_sz;
892 cfg.iova = (uintptr_t)nix->inb_sa_base;
893 cfg.max_sa = nix->inb_spi_mask + 1;
895 cfg.tag_const = tag_const;
897 return roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
901 roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
902 enum roc_nix_inl_sa_sync_op op)
904 struct idev_cfg *idev = idev_get_cfg();
905 struct nix_inl_dev *inl_dev = NULL;
906 struct roc_cpt_lf *outb_lf = NULL;
907 union cpt_lf_ctx_reload reload;
908 union cpt_lf_ctx_flush flush;
909 bool get_inl_lf = true;
913 /* Nothing much to do on cn9k */
914 if (roc_model_is_cn9k()) {
915 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
920 inl_dev = idev->nix_inl_dev;
922 if (!inl_dev && roc_nix == NULL)
926 nix = roc_nix_to_nix_priv(roc_nix);
927 outb_lf = nix->cpt_lf_base;
928 if (inb && !nix->inb_inl_dev)
932 if (inb && get_inl_lf) {
934 if (inl_dev && inl_dev->attach_cptlf)
935 outb_lf = &inl_dev->cpt_lf;
939 rbase = outb_lf->rbase;
944 case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
947 case ROC_NIX_INL_SA_OP_FLUSH:
948 flush.s.cptr = ((uintptr_t)sa) >> 7;
949 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
951 case ROC_NIX_INL_SA_OP_RELOAD:
952 reload.s.cptr = ((uintptr_t)sa) >> 7;
953 plt_write64(reload.u, rbase + CPT_LF_CTX_RELOAD);
960 plt_err("Could not get CPT LF for SA sync");
965 roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
966 bool inb, uint16_t sa_len)
968 struct idev_cfg *idev = idev_get_cfg();
969 struct nix_inl_dev *inl_dev = NULL;
970 struct roc_cpt_lf *outb_lf = NULL;
971 union cpt_lf_ctx_flush flush;
972 bool get_inl_lf = true;
977 /* Nothing much to do on cn9k */
978 if (roc_model_is_cn9k()) {
979 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
984 inl_dev = idev->nix_inl_dev;
986 if (!inl_dev && roc_nix == NULL)
990 nix = roc_nix_to_nix_priv(roc_nix);
991 outb_lf = nix->cpt_lf_base;
993 if (inb && !nix->inb_inl_dev)
997 if (inb && get_inl_lf) {
999 if (inl_dev && inl_dev->attach_cptlf)
1000 outb_lf = &inl_dev->cpt_lf;
1004 rbase = outb_lf->rbase;
1007 rc = roc_cpt_ctx_write(outb_lf, sa_dptr, sa_cptr, sa_len);
1010 /* Trigger CTX flush to write dirty data back to DRAM */
1011 flush.s.cptr = ((uintptr_t)sa_cptr) >> 7;
1012 plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH);
1016 plt_nix_dbg("Could not get CPT LF for CTX write");
1021 roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
1023 struct idev_cfg *idev = idev_get_cfg();
1024 struct nix_inl_dev *inl_dev = NULL;
1025 void *sa, *sa_base = NULL;
1026 struct nix *nix = NULL;
1027 uint16_t max_spi = 0;
1031 if (roc_model_is_cn9k())
1034 if (!inb_inl_dev && (roc_nix == NULL))
1038 if ((idev == NULL) || (idev->nix_inl_dev == NULL))
1040 inl_dev = idev->nix_inl_dev;
1042 nix = roc_nix_to_nix_priv(roc_nix);
1043 if (!nix->inl_inb_ena)
1045 sa_base = nix->inb_sa_base;
1046 max_spi = roc_nix->ipsec_in_max_spi;
1050 if (inl_dev->rq_refs == 0) {
1051 inl_dev->ts_ena = ts_ena;
1052 max_spi = inl_dev->ipsec_in_max_spi;
1053 sa_base = inl_dev->inb_sa_base;
1054 } else if (inl_dev->ts_ena != ts_ena) {
1055 if (inl_dev->ts_ena)
1056 plt_err("Inline device is already configured with TS enable");
1058 plt_err("Inline device is already configured with TS disable");
1065 pkind = ts_ena ? ROC_IE_OT_CPT_TS_PKIND : ROC_IE_OT_CPT_PKIND;
1067 sa = (uint8_t *)sa_base;
1068 if (pkind == ((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind)
1071 for (i = 0; i < max_spi; i++) {
1072 sa = ((uint8_t *)sa_base) +
1073 (i * ROC_NIX_INL_OT_IPSEC_INB_SA_SZ);
1074 ((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind = pkind;
1080 roc_nix_inl_dev_lock(void)
1082 struct idev_cfg *idev = idev_get_cfg();
1085 plt_spinlock_lock(&idev->nix_inl_dev_lock);
1089 roc_nix_inl_dev_unlock(void)
1091 struct idev_cfg *idev = idev_get_cfg();
1094 plt_spinlock_unlock(&idev->nix_inl_dev_lock);