1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define NIX_AURA_DROP_PC_DFLT 40
10 /* Default Rx Config for Inline NIX LF */
11 #define NIX_INL_LF_RX_CFG \
12 (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | \
13 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD | \
14 ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 | \
15 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | \
16 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
19 nix_inl_dev_pffunc_get(void)
21 struct idev_cfg *idev = idev_get_cfg();
22 struct nix_inl_dev *inl_dev;
25 inl_dev = idev->nix_inl_dev;
27 return inl_dev->dev.pf_func;
33 roc_nix_inl_dev_pffunc_get(void)
35 return nix_inl_dev_pffunc_get();
39 nix_inl_selftest_work_cb(uint64_t *gw, void *args)
41 uintptr_t work = gw[1];
43 *((uintptr_t *)args + (gw[0] & 0x1)) = work;
45 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
49 nix_inl_selftest(void)
51 struct idev_cfg *idev = idev_get_cfg();
52 roc_nix_inl_sso_work_cb_t save_cb;
53 static uintptr_t work_arr[2];
54 struct nix_inl_dev *inl_dev;
62 inl_dev = idev->nix_inl_dev;
66 plt_info("Performing nix inl self test");
68 /* Save and update cb to test cb */
69 save_cb = inl_dev->work_cb;
70 save_cb_args = inl_dev->cb_args;
71 inl_dev->work_cb = nix_inl_selftest_work_cb;
72 inl_dev->cb_args = work_arr;
74 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
76 #define WORK_MAGIC1 0x335577ff0
77 #define WORK_MAGIC2 0xdeadbeef0
80 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
81 roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
82 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
83 roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
87 /* Check if we got expected work */
88 if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
89 plt_err("Failed to get expected work, [0]=%p [1]=%p",
90 (void *)work_arr[0], (void *)work_arr[1]);
95 plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
100 inl_dev->work_cb = save_cb;
101 inl_dev->cb_args = save_cb_args;
106 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
108 struct mbox *mbox = (&inl_dev->dev)->mbox;
111 req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
115 return mbox_process(mbox);
119 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
121 struct nix_inline_ipsec_lf_cfg *lf_cfg;
122 struct mbox *mbox = (&inl_dev->dev)->mbox;
126 lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
132 max_sa = inl_dev->inb_spi_mask + 1;
133 sa_w = plt_log2_u32(max_sa);
136 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
137 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
138 /* CN9K SA size is different */
139 if (roc_model_is_cn9k())
140 lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
142 lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
143 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
144 lf_cfg->ipsec_cfg0.sa_pow2_size =
145 plt_log2_u32(inl_dev->inb_sa_sz);
147 lf_cfg->ipsec_cfg0.tag_const = 0;
148 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
153 return mbox_process(mbox);
157 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
159 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
160 struct dev *dev = &inl_dev->dev;
164 if (!inl_dev->attach_cptlf)
168 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
169 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
170 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
171 rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
173 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
177 /* Setup CPT LF for submitting control opcode */
178 lf = &inl_dev->cpt_lf;
180 lf->nb_desc = 0; /* Set to default */
181 lf->dev = &inl_dev->dev;
182 lf->msixoff = inl_dev->cpt_msixoff;
183 lf->pci_dev = inl_dev->pci_dev;
185 rc = cpt_lf_init(lf);
187 plt_err("Failed to initialize CPT LF, rc=%d", rc);
191 roc_cpt_iq_enable(lf);
194 rc |= cpt_lfs_free(dev);
199 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
201 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
202 struct dev *dev = &inl_dev->dev;
205 if (!inl_dev->attach_cptlf)
208 /* Cleanup CPT LF queue */
211 /* Free LF resources */
212 rc = cpt_lfs_free(dev);
214 plt_err("Failed to free CPT LF resources, rc=%d", rc);
218 rc = cpt_lfs_detach(dev);
220 plt_err("Failed to detach CPT LF, rc=%d", rc);
227 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
229 struct sso_lf_alloc_rsp *sso_rsp;
230 struct dev *dev = &inl_dev->dev;
231 uint16_t hwgrp[1] = {0};
235 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
237 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
242 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
244 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
248 inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
249 inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
250 inl_dev->iue = sso_rsp->in_unit_entries;
252 inl_dev->nb_xae = inl_dev->iue;
253 rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
254 inl_dev->xae_waes, inl_dev->xaq_buf_size,
257 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
261 /* Setup xaq for hwgrps */
262 rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
264 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
268 /* Register SSO, SSOW error and work irq's */
269 rc = nix_inl_sso_register_irqs(inl_dev);
271 plt_err("Failed to register sso irq's, rc=%d", rc);
275 /* Setup hwgrp->hws link */
276 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
279 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
284 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
286 sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
288 sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
290 sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
295 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
297 uint16_t hwgrp[1] = {0};
300 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
302 /* Unregister SSO/SSOW IRQ's */
303 nix_inl_sso_unregister_irqs(inl_dev);
306 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
308 /* Release XAQ aura */
309 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
311 /* Free SSO, SSOW LF's */
312 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
313 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
315 /* Free the XAQ aura */
316 sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
322 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
324 uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
325 uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
326 struct dev *dev = &inl_dev->dev;
327 struct mbox *mbox = dev->mbox;
328 struct nix_lf_alloc_rsp *rsp;
329 struct nix_lf_alloc_req *req;
330 struct nix_hw_info *hw_info;
336 max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
338 /* Alloc NIX LF needed for single RQ */
339 req = mbox_alloc_msg_nix_lf_alloc(mbox);
346 req->xqe_sz = NIX_XQESZ_W16;
347 /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
348 req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
349 req->rss_grps = ROC_NIX_RSS_GRPS;
350 req->npa_func = idev_npa_pffunc_get();
351 req->sso_func = dev->pf_func;
352 req->rx_cfg = NIX_INL_LF_RX_CFG;
353 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
355 if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
356 roc_model_is_cnf10kb_a0())
357 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
359 rc = mbox_process_msg(mbox, (void *)&rsp);
361 plt_err("Failed to alloc lf, rc=%d", rc);
365 inl_dev->lf_tx_stats = rsp->lf_tx_stats;
366 inl_dev->lf_rx_stats = rsp->lf_rx_stats;
367 inl_dev->qints = rsp->qints;
368 inl_dev->cints = rsp->cints;
370 /* Get VWQE info if supported */
371 if (roc_model_is_cn10k()) {
372 mbox_alloc_msg_nix_get_hw_info(mbox);
373 rc = mbox_process_msg(mbox, (void *)&hw_info);
375 plt_err("Failed to get HW info, rc=%d", rc);
378 inl_dev->vwqe_interval = hw_info->vwqe_delay;
381 /* Register nix interrupts */
382 rc = nix_inl_nix_register_irqs(inl_dev);
384 plt_err("Failed to register nix irq's, rc=%d", rc);
388 /* CN9K SA is different */
389 if (roc_model_is_cn9k())
390 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
392 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
394 /* Alloc contiguous memory for Inbound SA's */
395 inl_dev->inb_sa_sz = inb_sa_sz;
396 inl_dev->inb_spi_mask = max_sa - 1;
397 inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
398 ROC_NIX_INL_SA_BASE_ALIGN);
399 if (!inl_dev->inb_sa_base) {
400 plt_err("Failed to allocate memory for Inbound SA");
402 goto unregister_irqs;
405 if (roc_model_is_cn10k()) {
406 for (i = 0; i < max_sa; i++) {
407 sa = ((uint8_t *)inl_dev->inb_sa_base) +
409 roc_ot_ipsec_inb_sa_init(sa, true);
412 /* Setup device specific inb SA table */
413 rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
415 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
421 plt_free(inl_dev->inb_sa_base);
422 inl_dev->inb_sa_base = NULL;
424 nix_inl_nix_unregister_irqs(inl_dev);
426 mbox_alloc_msg_nix_lf_free(mbox);
427 rc |= mbox_process(mbox);
432 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
434 struct dev *dev = &inl_dev->dev;
435 struct mbox *mbox = dev->mbox;
436 struct nix_lf_free_req *req;
437 struct ndc_sync_op *ndc_req;
440 /* Disable Inbound processing */
441 rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
443 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
445 /* Sync NDC-NIX for LF */
446 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
449 ndc_req->nix_lf_rx_sync = 1;
450 rc = mbox_process(mbox);
452 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
454 /* Unregister IRQs */
455 nix_inl_nix_unregister_irqs(inl_dev);
457 /* By default all associated mcam rules are deleted */
458 req = mbox_alloc_msg_nix_lf_free(mbox);
462 return mbox_process(mbox);
466 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
468 struct msix_offset_rsp *msix_rsp;
469 struct dev *dev = &inl_dev->dev;
470 struct mbox *mbox = dev->mbox;
471 struct rsrc_attach_req *req;
472 uint64_t nix_blkaddr;
475 req = mbox_alloc_msg_attach_resources(mbox);
479 /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
483 if (inl_dev->attach_cptlf) {
485 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
488 rc = mbox_process(dev->mbox);
492 /* Get MSIX vector offsets */
493 mbox_alloc_msg_msix_offset(mbox);
494 rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
498 inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
499 inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
500 inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
501 inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
503 nix_blkaddr = nix_get_blkaddr(dev);
504 inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
506 /* Update base addresses for LF's */
507 inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
508 inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
509 inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
510 inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
516 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
518 struct dev *dev = &inl_dev->dev;
519 struct mbox *mbox = dev->mbox;
520 struct rsrc_detach_req *req;
523 req = mbox_alloc_msg_detach_resources(mbox);
530 req->cptlfs = !!inl_dev->attach_cptlf;
532 return mbox_process(dev->mbox);
536 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
538 uintptr_t sso_base = inl_dev->sso_base;
541 while (wait_ms > 0) {
542 /* Break when empty */
543 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
544 !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
555 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
557 struct idev_cfg *idev = idev_get_cfg();
558 struct nix_inl_dev *inl_dev;
564 inl_dev = idev->nix_inl_dev;
565 /* Nothing to do if no inline device */
570 inl_dev->nb_xae = inl_dev->iue;
574 /* Check if aura is already considered */
575 for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
576 if (inl_dev->pkt_pools[i] == aura_handle)
581 /* Disable RQ if enabled */
582 if (inl_dev->rq_refs) {
583 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
585 plt_err("Failed to disable inline dev RQ, rc=%d", rc);
590 /* Wait for events to be removed */
591 rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
593 plt_err("Timeout waiting for inline device event cleanup");
598 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
600 inl_dev->pkt_pools_cnt++;
602 plt_realloc(inl_dev->pkt_pools,
603 sizeof(uint64_t *) * inl_dev->pkt_pools_cnt, 0);
604 if (!inl_dev->pkt_pools)
605 inl_dev->pkt_pools_cnt = 0;
607 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
608 inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
610 /* Realloc XAQ aura */
611 rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
612 inl_dev->nb_xae, inl_dev->xae_waes,
613 inl_dev->xaq_buf_size, 1);
615 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
619 /* Setup xaq for hwgrps */
620 rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
622 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
627 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
631 if (inl_dev->rq_refs) {
632 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
634 plt_err("Failed to enable inline dev RQ, rc=%d", rc);
641 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
643 struct plt_pci_device *pci_dev;
644 struct nix_inl_dev *inl_dev;
645 struct idev_cfg *idev;
648 pci_dev = roc_inl_dev->pci_dev;
650 /* Skip probe if already done */
651 idev = idev_get_cfg();
655 if (idev->nix_inl_dev) {
656 plt_info("Skipping device %s, inline device already probed",
661 PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
663 inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
664 memset(inl_dev, 0, sizeof(*inl_dev));
666 inl_dev->pci_dev = pci_dev;
667 inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
668 inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
669 inl_dev->selftest = roc_inl_dev->selftest;
670 inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
671 inl_dev->channel = roc_inl_dev->channel;
672 inl_dev->chan_mask = roc_inl_dev->chan_mask;
673 inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
674 inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
675 inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
676 inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
678 if (roc_inl_dev->spb_drop_pc)
679 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
680 if (roc_inl_dev->lpb_drop_pc)
681 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
683 /* Initialize base device */
684 rc = dev_init(&inl_dev->dev, pci_dev);
686 plt_err("Failed to init roc device");
690 /* Attach LF resources */
691 rc = nix_inl_lf_attach(inl_dev);
693 plt_err("Failed to attach LF resources, rc=%d", rc);
698 rc = nix_inl_nix_setup(inl_dev);
703 rc = nix_inl_sso_setup(inl_dev);
708 rc = nix_inl_cpt_setup(inl_dev);
712 /* Perform selftest if asked for */
713 if (inl_dev->selftest) {
714 rc = nix_inl_selftest();
719 idev->nix_inl_dev = inl_dev;
723 rc |= nix_inl_cpt_release(inl_dev);
725 rc |= nix_inl_sso_release(inl_dev);
727 rc |= nix_inl_nix_release(inl_dev);
729 rc |= nix_inl_lf_detach(inl_dev);
731 rc |= dev_fini(&inl_dev->dev, pci_dev);
737 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
739 struct plt_pci_device *pci_dev;
740 struct nix_inl_dev *inl_dev;
741 struct idev_cfg *idev;
744 idev = idev_get_cfg();
748 if (!idev->nix_inl_dev ||
749 PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
752 inl_dev = idev->nix_inl_dev;
753 pci_dev = inl_dev->pci_dev;
755 /* Flush Inbound CTX cache entries */
756 nix_inl_cpt_ctx_cache_sync(inl_dev);
759 rc = nix_inl_sso_release(inl_dev);
762 rc |= nix_inl_nix_release(inl_dev);
765 rc |= nix_inl_lf_detach(inl_dev);
768 rc |= dev_fini(&inl_dev->dev, pci_dev);
772 idev->nix_inl_dev = NULL;