1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #define NIX_AURA_DROP_PC_DFLT 40
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG \
14 (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | \
15 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD | \
16 ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 | \
17 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | \
18 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
24 nix_inl_dev_pffunc_get(void)
26 struct idev_cfg *idev = idev_get_cfg();
27 struct nix_inl_dev *inl_dev;
30 inl_dev = idev->nix_inl_dev;
32 return inl_dev->dev.pf_func;
38 roc_nix_inl_dev_pffunc_get(void)
40 return nix_inl_dev_pffunc_get();
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
46 uintptr_t work = gw[1];
49 *((uintptr_t *)args + (gw[0] & 0x1)) = work;
51 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
55 nix_inl_selftest(void)
57 struct idev_cfg *idev = idev_get_cfg();
58 roc_nix_inl_sso_work_cb_t save_cb;
59 static uintptr_t work_arr[2];
60 struct nix_inl_dev *inl_dev;
68 inl_dev = idev->nix_inl_dev;
72 plt_info("Performing nix inl self test");
74 /* Save and update cb to test cb */
75 save_cb = inl_dev->work_cb;
76 save_cb_args = inl_dev->cb_args;
77 inl_dev->work_cb = nix_inl_selftest_work_cb;
78 inl_dev->cb_args = work_arr;
80 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
86 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87 roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89 roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
93 /* Check if we got expected work */
94 if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95 plt_err("Failed to get expected work, [0]=%p [1]=%p",
96 (void *)work_arr[0], (void *)work_arr[1]);
101 plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102 (void *)work_arr[1]);
106 inl_dev->work_cb = save_cb;
107 inl_dev->cb_args = save_cb_args;
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
114 struct mbox *mbox = (&inl_dev->dev)->mbox;
117 req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
121 return mbox_process(mbox);
125 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
127 struct nix_inline_ipsec_lf_cfg *lf_cfg;
128 struct mbox *mbox = (&inl_dev->dev)->mbox;
132 lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
138 max_sa = inl_dev->inb_spi_mask + 1;
139 sa_w = plt_log2_u32(max_sa);
142 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
143 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
144 /* CN9K SA size is different */
145 if (roc_model_is_cn9k())
146 lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
148 lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
149 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
150 lf_cfg->ipsec_cfg0.sa_pow2_size =
151 plt_log2_u32(inl_dev->inb_sa_sz);
153 lf_cfg->ipsec_cfg0.tag_const = 0;
154 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
159 return mbox_process(mbox);
163 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
165 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
166 struct dev *dev = &inl_dev->dev;
170 if (!inl_dev->attach_cptlf)
174 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
175 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
176 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
177 rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
179 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
183 /* Setup CPT LF for submitting control opcode */
184 lf = &inl_dev->cpt_lf;
186 lf->nb_desc = 0; /* Set to default */
187 lf->dev = &inl_dev->dev;
188 lf->msixoff = inl_dev->cpt_msixoff;
189 lf->pci_dev = inl_dev->pci_dev;
191 rc = cpt_lf_init(lf);
193 plt_err("Failed to initialize CPT LF, rc=%d", rc);
197 roc_cpt_iq_enable(lf);
200 rc |= cpt_lfs_free(dev);
205 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
207 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
208 struct dev *dev = &inl_dev->dev;
211 if (!inl_dev->attach_cptlf)
214 /* Cleanup CPT LF queue */
217 /* Free LF resources */
218 rc = cpt_lfs_free(dev);
220 plt_err("Failed to free CPT LF resources, rc=%d", rc);
224 rc = cpt_lfs_detach(dev);
226 plt_err("Failed to detach CPT LF, rc=%d", rc);
233 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
235 struct sso_lf_alloc_rsp *sso_rsp;
236 struct dev *dev = &inl_dev->dev;
237 uint16_t hwgrp[1] = {0};
241 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
243 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
248 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
250 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
254 inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
255 inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
256 inl_dev->iue = sso_rsp->in_unit_entries;
258 inl_dev->nb_xae = inl_dev->iue;
259 rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
260 inl_dev->xae_waes, inl_dev->xaq_buf_size,
263 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
267 /* Setup xaq for hwgrps */
268 rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
270 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
274 /* Register SSO, SSOW error and work irq's */
275 rc = nix_inl_sso_register_irqs(inl_dev);
277 plt_err("Failed to register sso irq's, rc=%d", rc);
281 /* Setup hwgrp->hws link */
282 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
285 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
290 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
292 sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
294 sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
296 sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
301 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
303 uint16_t hwgrp[1] = {0};
306 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
308 /* Unregister SSO/SSOW IRQ's */
309 nix_inl_sso_unregister_irqs(inl_dev);
312 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
314 /* Release XAQ aura */
315 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
317 /* Free SSO, SSOW LF's */
318 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
319 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
321 /* Free the XAQ aura */
322 sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
328 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
330 uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
331 uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
332 struct dev *dev = &inl_dev->dev;
333 struct mbox *mbox = dev->mbox;
334 struct nix_lf_alloc_rsp *rsp;
335 struct nix_lf_alloc_req *req;
336 struct nix_hw_info *hw_info;
337 struct roc_nix_rq *rqs;
343 max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
345 /* Alloc NIX LF needed for single RQ */
346 req = mbox_alloc_msg_nix_lf_alloc(mbox);
349 /* We will have per-port RQ if it is not with channel masking */
350 req->rq_cnt = inl_dev->nb_rqs;
354 req->xqe_sz = NIX_XQESZ_W16;
355 /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
356 req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
357 req->rss_grps = ROC_NIX_RSS_GRPS;
358 req->npa_func = idev_npa_pffunc_get();
359 req->sso_func = dev->pf_func;
360 req->rx_cfg = NIX_INL_LF_RX_CFG;
361 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
363 if (roc_errata_nix_has_no_drop_re())
364 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
366 rc = mbox_process_msg(mbox, (void *)&rsp);
368 plt_err("Failed to alloc lf, rc=%d", rc);
372 inl_dev->lf_tx_stats = rsp->lf_tx_stats;
373 inl_dev->lf_rx_stats = rsp->lf_rx_stats;
374 inl_dev->qints = rsp->qints;
375 inl_dev->cints = rsp->cints;
377 /* Get VWQE info if supported */
378 if (roc_model_is_cn10k()) {
379 mbox_alloc_msg_nix_get_hw_info(mbox);
380 rc = mbox_process_msg(mbox, (void *)&hw_info);
382 plt_err("Failed to get HW info, rc=%d", rc);
385 inl_dev->vwqe_interval = hw_info->vwqe_delay;
388 /* Register nix interrupts */
389 rc = nix_inl_nix_register_irqs(inl_dev);
391 plt_err("Failed to register nix irq's, rc=%d", rc);
395 /* CN9K SA is different */
396 if (roc_model_is_cn9k())
397 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
399 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
401 /* Alloc contiguous memory for Inbound SA's */
402 inl_dev->inb_sa_sz = inb_sa_sz;
403 inl_dev->inb_spi_mask = max_sa - 1;
404 inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
405 ROC_NIX_INL_SA_BASE_ALIGN);
406 if (!inl_dev->inb_sa_base) {
407 plt_err("Failed to allocate memory for Inbound SA");
409 goto unregister_irqs;
412 if (roc_model_is_cn10k()) {
413 for (i = 0; i < max_sa; i++) {
414 sa = ((uint8_t *)inl_dev->inb_sa_base) +
416 roc_ot_ipsec_inb_sa_init(sa, true);
419 /* Setup device specific inb SA table */
420 rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
422 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
426 /* Allocate memory for RQ's */
427 rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
429 plt_err("Failed to allocate memory for RQ's");
436 plt_free(inl_dev->inb_sa_base);
437 inl_dev->inb_sa_base = NULL;
439 nix_inl_nix_unregister_irqs(inl_dev);
441 mbox_alloc_msg_nix_lf_free(mbox);
442 rc |= mbox_process(mbox);
447 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
449 struct dev *dev = &inl_dev->dev;
450 struct mbox *mbox = dev->mbox;
451 struct nix_lf_free_req *req;
452 struct ndc_sync_op *ndc_req;
455 /* Disable Inbound processing */
456 rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
458 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
460 /* Sync NDC-NIX for LF */
461 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
464 ndc_req->nix_lf_rx_sync = 1;
465 rc = mbox_process(mbox);
467 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
469 /* Unregister IRQs */
470 nix_inl_nix_unregister_irqs(inl_dev);
472 /* By default all associated mcam rules are deleted */
473 req = mbox_alloc_msg_nix_lf_free(mbox);
477 rc = mbox_process(mbox);
481 plt_free(inl_dev->rqs);
482 plt_free(inl_dev->inb_sa_base);
484 inl_dev->inb_sa_base = NULL;
489 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
491 struct msix_offset_rsp *msix_rsp;
492 struct dev *dev = &inl_dev->dev;
493 struct mbox *mbox = dev->mbox;
494 struct rsrc_attach_req *req;
495 uint64_t nix_blkaddr;
498 req = mbox_alloc_msg_attach_resources(mbox);
502 /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
506 if (inl_dev->attach_cptlf) {
508 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
511 rc = mbox_process(dev->mbox);
515 /* Get MSIX vector offsets */
516 mbox_alloc_msg_msix_offset(mbox);
517 rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
521 inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
522 inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
523 inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
524 inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
526 nix_blkaddr = nix_get_blkaddr(dev);
527 inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
529 /* Update base addresses for LF's */
530 inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
531 inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
532 inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
533 inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
539 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
541 struct dev *dev = &inl_dev->dev;
542 struct mbox *mbox = dev->mbox;
543 struct rsrc_detach_req *req;
546 req = mbox_alloc_msg_detach_resources(mbox);
553 req->cptlfs = !!inl_dev->attach_cptlf;
555 return mbox_process(dev->mbox);
559 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
561 uintptr_t sso_base = inl_dev->sso_base;
564 while (wait_ms > 0) {
565 /* Break when empty */
566 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
567 !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
578 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
580 struct idev_cfg *idev = idev_get_cfg();
581 struct nix_inl_dev *inl_dev;
587 inl_dev = idev->nix_inl_dev;
588 /* Nothing to do if no inline device */
593 inl_dev->nb_xae = inl_dev->iue;
597 /* Check if aura is already considered */
598 for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
599 if (inl_dev->pkt_pools[i] == aura_handle)
604 /* Disable RQ if enabled */
605 for (i = 0; i < inl_dev->nb_rqs; i++) {
606 if (!inl_dev->rqs[i].inl_dev_refs)
608 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
610 plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
616 /* Wait for events to be removed */
617 rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
619 plt_err("Timeout waiting for inline device event cleanup");
624 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
626 inl_dev->pkt_pools_cnt++;
628 plt_realloc(inl_dev->pkt_pools,
629 sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
630 if (!inl_dev->pkt_pools)
631 inl_dev->pkt_pools_cnt = 0;
633 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
634 inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
636 /* Realloc XAQ aura */
637 rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
638 inl_dev->nb_xae, inl_dev->xae_waes,
639 inl_dev->xaq_buf_size, 1);
641 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
645 /* Setup xaq for hwgrps */
646 rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
648 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
653 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
657 for (i = 0; i < inl_dev->nb_rqs; i++) {
658 if (!inl_dev->rqs[i].inl_dev_refs)
661 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
663 plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
671 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
673 union roc_ot_ipsec_err_ring_head head;
674 struct roc_ot_ipsec_outb_sa *sa;
675 uint16_t head_l, tail_l;
679 port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
680 ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
682 plt_err("Invalid soft exp ring base");
686 head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
687 head_l = head.s.head_pos;
688 tail_l = head.s.tail_pos;
690 while (tail_l != head_l) {
691 union roc_ot_ipsec_err_ring_entry entry;
692 int poll_counter = 0;
694 while (poll_counter++ <
695 ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
697 entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
699 if (likely(entry.u64))
703 entry.u64 = plt_be_to_cpu_64(entry.u64);
704 sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
706 (entry.s.data0 << 7));
709 uint64_t tmp = ~(uint32_t)0x0;
710 inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
711 __atomic_store_n(ring_base + tail_l + 1, 0ULL,
713 __atomic_add_fetch((uint32_t *)ring_base, 1,
716 plt_err("Invalid SA");
723 nix_inl_outb_poll_thread(void *args)
725 struct nix_inl_dev *inl_dev = args;
730 poll_freq = inl_dev->soft_exp_poll_freq;
732 while (!soft_exp_poll_thread_exit) {
733 if (soft_exp_consumer_cnt) {
734 for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
735 bit = plt_bitmap_get(
736 inl_dev->soft_exp_ring_bmap, i);
738 inl_outb_soft_exp_poll(inl_dev, i);
748 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
750 struct plt_bitmap *bmap;
756 /* Allocate a bitmap that pool thread uses to get the port_id
757 * that's corresponding to the inl_outb_soft_exp_ring
760 plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
761 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
763 plt_err("soft expiry ring bmap alloc failed");
768 bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
770 plt_err("soft expiry ring bmap init failed");
776 inl_dev->soft_exp_ring_bmap_mem = mem;
777 inl_dev->soft_exp_ring_bmap = bmap;
778 inl_dev->sa_soft_exp_ring = plt_zmalloc(
779 ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
780 if (!inl_dev->sa_soft_exp_ring) {
781 plt_err("soft expiry ring pointer array alloc failed");
787 for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
788 plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
790 soft_exp_consumer_cnt = 0;
791 soft_exp_poll_thread_exit = false;
792 inl_dev->soft_exp_poll_freq = 100;
793 rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread,
794 "OUTB_SOFT_EXP_POLL_THREAD", NULL,
795 nix_inl_outb_poll_thread, inl_dev);
797 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
798 plt_free(inl_dev->soft_exp_ring_bmap_mem);
806 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
808 struct plt_pci_device *pci_dev;
809 struct nix_inl_dev *inl_dev;
810 struct idev_cfg *idev;
813 pci_dev = roc_inl_dev->pci_dev;
815 /* Skip probe if already done */
816 idev = idev_get_cfg();
820 if (idev->nix_inl_dev) {
821 plt_info("Skipping device %s, inline device already probed",
826 PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
828 inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
829 memset(inl_dev, 0, sizeof(*inl_dev));
831 inl_dev->pci_dev = pci_dev;
832 inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
833 inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
834 inl_dev->selftest = roc_inl_dev->selftest;
835 inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
836 inl_dev->channel = roc_inl_dev->channel;
837 inl_dev->chan_mask = roc_inl_dev->chan_mask;
838 inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
839 inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
840 inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
841 inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
842 inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
843 inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
845 if (roc_inl_dev->spb_drop_pc)
846 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
847 if (roc_inl_dev->lpb_drop_pc)
848 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
850 /* Initialize base device */
851 rc = dev_init(&inl_dev->dev, pci_dev);
853 plt_err("Failed to init roc device");
857 /* Attach LF resources */
858 rc = nix_inl_lf_attach(inl_dev);
860 plt_err("Failed to attach LF resources, rc=%d", rc);
865 rc = nix_inl_nix_setup(inl_dev);
870 rc = nix_inl_sso_setup(inl_dev);
875 rc = nix_inl_cpt_setup(inl_dev);
879 if (inl_dev->set_soft_exp_poll) {
880 rc = nix_inl_outb_poll_thread_setup(inl_dev);
885 /* Perform selftest if asked for */
886 if (inl_dev->selftest) {
887 rc = nix_inl_selftest();
892 idev->nix_inl_dev = inl_dev;
896 rc |= nix_inl_cpt_release(inl_dev);
898 rc |= nix_inl_sso_release(inl_dev);
900 rc |= nix_inl_nix_release(inl_dev);
902 rc |= nix_inl_lf_detach(inl_dev);
904 rc |= dev_fini(&inl_dev->dev, pci_dev);
910 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
912 struct plt_pci_device *pci_dev;
913 struct nix_inl_dev *inl_dev;
914 struct idev_cfg *idev;
917 idev = idev_get_cfg();
921 if (!idev->nix_inl_dev ||
922 PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
925 inl_dev = idev->nix_inl_dev;
926 pci_dev = inl_dev->pci_dev;
928 if (inl_dev->set_soft_exp_poll) {
929 soft_exp_poll_thread_exit = true;
930 pthread_join(inl_dev->soft_exp_poll_thread, NULL);
931 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
932 plt_free(inl_dev->soft_exp_ring_bmap_mem);
933 plt_free(inl_dev->sa_soft_exp_ring);
936 /* Flush Inbound CTX cache entries */
937 nix_inl_cpt_ctx_cache_sync(inl_dev);
940 rc = nix_inl_sso_release(inl_dev);
943 rc |= nix_inl_nix_release(inl_dev);
946 rc |= nix_inl_lf_detach(inl_dev);
949 rc |= dev_fini(&inl_dev->dev, pci_dev);
953 idev->nix_inl_dev = NULL;