1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 /* Default Rx Config for Inline NIX LF */
9 #define NIX_INL_LF_RX_CFG \
10 (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | \
11 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD | \
12 ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 | \
13 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | \
14 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
17 nix_inl_dev_pffunc_get(void)
19 struct idev_cfg *idev = idev_get_cfg();
20 struct nix_inl_dev *inl_dev;
23 inl_dev = idev->nix_inl_dev;
25 return inl_dev->dev.pf_func;
31 nix_inl_selftest_work_cb(uint64_t *gw, void *args)
33 uintptr_t work = gw[1];
35 *((uintptr_t *)args + (gw[0] & 0x1)) = work;
37 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
41 nix_inl_selftest(void)
43 struct idev_cfg *idev = idev_get_cfg();
44 roc_nix_inl_sso_work_cb_t save_cb;
45 static uintptr_t work_arr[2];
46 struct nix_inl_dev *inl_dev;
54 inl_dev = idev->nix_inl_dev;
58 plt_info("Performing nix inl self test");
60 /* Save and update cb to test cb */
61 save_cb = inl_dev->work_cb;
62 save_cb_args = inl_dev->cb_args;
63 inl_dev->work_cb = nix_inl_selftest_work_cb;
64 inl_dev->cb_args = work_arr;
66 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
68 #define WORK_MAGIC1 0x335577ff0
69 #define WORK_MAGIC2 0xdeadbeef0
72 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
73 roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
74 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
75 roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
79 /* Check if we got expected work */
80 if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
81 plt_err("Failed to get expected work, [0]=%p [1]=%p",
82 (void *)work_arr[0], (void *)work_arr[1]);
87 plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
92 inl_dev->work_cb = save_cb;
93 inl_dev->cb_args = save_cb_args;
98 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
100 struct mbox *mbox = (&inl_dev->dev)->mbox;
103 req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
107 return mbox_process(mbox);
111 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
113 struct nix_inline_ipsec_lf_cfg *lf_cfg;
114 struct mbox *mbox = (&inl_dev->dev)->mbox;
117 lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
122 sa_w = plt_align32pow2(inl_dev->ipsec_in_max_spi + 1);
123 sa_w = plt_log2_u32(sa_w);
126 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
127 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
128 /* CN9K SA size is different */
129 if (roc_model_is_cn9k())
130 lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
132 lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
133 lf_cfg->ipsec_cfg1.sa_idx_max = inl_dev->ipsec_in_max_spi;
134 lf_cfg->ipsec_cfg0.sa_pow2_size =
135 plt_log2_u32(inl_dev->inb_sa_sz);
137 lf_cfg->ipsec_cfg0.tag_const = 0;
138 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
143 return mbox_process(mbox);
147 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
149 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
150 struct dev *dev = &inl_dev->dev;
154 if (!inl_dev->attach_cptlf)
158 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
159 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
160 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
161 rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
163 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
167 /* Setup CPT LF for submitting control opcode */
168 lf = &inl_dev->cpt_lf;
170 lf->nb_desc = 0; /* Set to default */
171 lf->dev = &inl_dev->dev;
172 lf->msixoff = inl_dev->cpt_msixoff;
173 lf->pci_dev = inl_dev->pci_dev;
175 rc = cpt_lf_init(lf);
177 plt_err("Failed to initialize CPT LF, rc=%d", rc);
181 roc_cpt_iq_enable(lf);
184 rc |= cpt_lfs_free(dev);
189 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
191 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
192 struct dev *dev = &inl_dev->dev;
195 if (!inl_dev->attach_cptlf)
198 /* Cleanup CPT LF queue */
201 /* Free LF resources */
202 rc = cpt_lfs_free(dev);
204 plt_err("Failed to free CPT LF resources, rc=%d", rc);
208 rc = cpt_lfs_detach(dev);
210 plt_err("Failed to detach CPT LF, rc=%d", rc);
217 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
219 struct sso_lf_alloc_rsp *sso_rsp;
220 struct dev *dev = &inl_dev->dev;
221 uint16_t hwgrp[1] = {0};
226 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
228 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
233 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
235 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
239 inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
240 inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
241 inl_dev->iue = sso_rsp->in_unit_entries;
243 xae_cnt = inl_dev->iue;
244 rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, xae_cnt,
245 inl_dev->xae_waes, inl_dev->xaq_buf_size,
248 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
252 /* Setup xaq for hwgrps */
253 rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
255 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
259 /* Register SSO, SSOW error and work irq's */
260 rc = nix_inl_sso_register_irqs(inl_dev);
262 plt_err("Failed to register sso irq's, rc=%d", rc);
266 /* Setup hwgrp->hws link */
267 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
270 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
275 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
277 sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
279 sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
281 sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
286 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
288 uint16_t hwgrp[1] = {0};
291 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
293 /* Unregister SSO/SSOW IRQ's */
294 nix_inl_sso_unregister_irqs(inl_dev);
297 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
299 /* Release XAQ aura */
300 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
302 /* Free SSO, SSOW LF's */
303 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
304 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
306 /* Free the XAQ aura */
307 sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
313 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
315 uint16_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
316 struct dev *dev = &inl_dev->dev;
317 struct mbox *mbox = dev->mbox;
318 struct nix_lf_alloc_rsp *rsp;
319 struct nix_lf_alloc_req *req;
320 struct nix_hw_info *hw_info;
325 /* Alloc NIX LF needed for single RQ */
326 req = mbox_alloc_msg_nix_lf_alloc(mbox);
333 req->xqe_sz = NIX_XQESZ_W16;
334 /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
335 req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
336 req->rss_grps = ROC_NIX_RSS_GRPS;
337 req->npa_func = idev_npa_pffunc_get();
338 req->sso_func = dev->pf_func;
339 req->rx_cfg = NIX_INL_LF_RX_CFG;
340 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
342 if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
343 roc_model_is_cnf10kb_a0())
344 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
346 rc = mbox_process_msg(mbox, (void *)&rsp);
348 plt_err("Failed to alloc lf, rc=%d", rc);
352 inl_dev->lf_tx_stats = rsp->lf_tx_stats;
353 inl_dev->lf_rx_stats = rsp->lf_rx_stats;
354 inl_dev->qints = rsp->qints;
355 inl_dev->cints = rsp->cints;
357 /* Get VWQE info if supported */
358 if (roc_model_is_cn10k()) {
359 mbox_alloc_msg_nix_get_hw_info(mbox);
360 rc = mbox_process_msg(mbox, (void *)&hw_info);
362 plt_err("Failed to get HW info, rc=%d", rc);
365 inl_dev->vwqe_interval = hw_info->vwqe_delay;
368 /* Register nix interrupts */
369 rc = nix_inl_nix_register_irqs(inl_dev);
371 plt_err("Failed to register nix irq's, rc=%d", rc);
375 /* CN9K SA is different */
376 if (roc_model_is_cn9k())
377 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
379 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
381 /* Alloc contiguous memory for Inbound SA's */
382 inl_dev->inb_sa_sz = inb_sa_sz;
383 inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
384 ROC_NIX_INL_SA_BASE_ALIGN);
385 if (!inl_dev->inb_sa_base) {
386 plt_err("Failed to allocate memory for Inbound SA");
388 goto unregister_irqs;
391 if (roc_model_is_cn10k()) {
392 for (i = 0; i < ipsec_in_max_spi; i++) {
393 sa = ((uint8_t *)inl_dev->inb_sa_base) +
395 roc_nix_inl_inb_sa_init(sa);
398 /* Setup device specific inb SA table */
399 rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
401 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
407 plt_free(inl_dev->inb_sa_base);
408 inl_dev->inb_sa_base = NULL;
410 nix_inl_nix_unregister_irqs(inl_dev);
412 mbox_alloc_msg_nix_lf_free(mbox);
413 rc |= mbox_process(mbox);
418 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
420 struct dev *dev = &inl_dev->dev;
421 struct mbox *mbox = dev->mbox;
422 struct nix_lf_free_req *req;
423 struct ndc_sync_op *ndc_req;
426 /* Disable Inbound processing */
427 rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
429 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
431 /* Sync NDC-NIX for LF */
432 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
435 ndc_req->nix_lf_rx_sync = 1;
436 rc = mbox_process(mbox);
438 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
440 /* Unregister IRQs */
441 nix_inl_nix_unregister_irqs(inl_dev);
443 /* By default all associated mcam rules are deleted */
444 req = mbox_alloc_msg_nix_lf_free(mbox);
448 return mbox_process(mbox);
452 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
454 struct msix_offset_rsp *msix_rsp;
455 struct dev *dev = &inl_dev->dev;
456 struct mbox *mbox = dev->mbox;
457 struct rsrc_attach_req *req;
458 uint64_t nix_blkaddr;
461 req = mbox_alloc_msg_attach_resources(mbox);
465 /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
469 if (inl_dev->attach_cptlf) {
471 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
474 rc = mbox_process(dev->mbox);
478 /* Get MSIX vector offsets */
479 mbox_alloc_msg_msix_offset(mbox);
480 rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
484 inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
485 inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
486 inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
487 inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
489 nix_blkaddr = nix_get_blkaddr(dev);
490 inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
492 /* Update base addresses for LF's */
493 inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
494 inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
495 inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
496 inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
502 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
504 struct dev *dev = &inl_dev->dev;
505 struct mbox *mbox = dev->mbox;
506 struct rsrc_detach_req *req;
509 req = mbox_alloc_msg_detach_resources(mbox);
516 req->cptlfs = !!inl_dev->attach_cptlf;
518 return mbox_process(dev->mbox);
522 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
524 struct plt_pci_device *pci_dev;
525 struct nix_inl_dev *inl_dev;
526 struct idev_cfg *idev;
529 pci_dev = roc_inl_dev->pci_dev;
531 /* Skip probe if already done */
532 idev = idev_get_cfg();
536 if (idev->nix_inl_dev) {
537 plt_info("Skipping device %s, inline device already probed",
542 PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
544 inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
545 memset(inl_dev, 0, sizeof(*inl_dev));
547 inl_dev->pci_dev = pci_dev;
548 inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
549 inl_dev->selftest = roc_inl_dev->selftest;
550 inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
551 inl_dev->channel = roc_inl_dev->channel;
552 inl_dev->chan_mask = roc_inl_dev->chan_mask;
553 inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
555 /* Initialize base device */
556 rc = dev_init(&inl_dev->dev, pci_dev);
558 plt_err("Failed to init roc device");
562 /* Attach LF resources */
563 rc = nix_inl_lf_attach(inl_dev);
565 plt_err("Failed to attach LF resources, rc=%d", rc);
570 rc = nix_inl_nix_setup(inl_dev);
575 rc = nix_inl_sso_setup(inl_dev);
580 rc = nix_inl_cpt_setup(inl_dev);
584 /* Perform selftest if asked for */
585 if (inl_dev->selftest) {
586 rc = nix_inl_selftest();
591 idev->nix_inl_dev = inl_dev;
595 rc |= nix_inl_cpt_release(inl_dev);
597 rc |= nix_inl_sso_release(inl_dev);
599 rc |= nix_inl_nix_release(inl_dev);
601 rc |= nix_inl_lf_detach(inl_dev);
603 rc |= dev_fini(&inl_dev->dev, pci_dev);
609 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
611 struct plt_pci_device *pci_dev;
612 struct nix_inl_dev *inl_dev;
613 struct idev_cfg *idev;
616 idev = idev_get_cfg();
620 if (!idev->nix_inl_dev ||
621 PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
624 inl_dev = idev->nix_inl_dev;
625 pci_dev = inl_dev->pci_dev;
627 /* Flush Inbound CTX cache entries */
628 nix_inl_cpt_ctx_cache_sync(inl_dev);
631 rc = nix_inl_sso_release(inl_dev);
634 rc |= nix_inl_nix_release(inl_dev);
637 rc |= nix_inl_lf_detach(inl_dev);
640 rc |= dev_fini(&inl_dev->dev, pci_dev);
644 idev->nix_inl_dev = NULL;