1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define XAQ_CACHE_CNT 0x7
10 /* Default Rx Config for Inline NIX LF */
11 #define NIX_INL_LF_RX_CFG \
12 (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR | \
13 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD | \
14 ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 | \
15 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 | \
16 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
19 nix_inl_dev_pffunc_get(void)
21 struct idev_cfg *idev = idev_get_cfg();
22 struct nix_inl_dev *inl_dev;
25 inl_dev = idev->nix_inl_dev;
27 return inl_dev->dev.pf_func;
33 nix_inl_selftest_work_cb(uint64_t *gw, void *args)
35 uintptr_t work = gw[1];
37 *((uintptr_t *)args + (gw[0] & 0x1)) = work;
39 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
43 nix_inl_selftest(void)
45 struct idev_cfg *idev = idev_get_cfg();
46 roc_nix_inl_sso_work_cb_t save_cb;
47 static uintptr_t work_arr[2];
48 struct nix_inl_dev *inl_dev;
56 inl_dev = idev->nix_inl_dev;
60 plt_info("Performing nix inl self test");
62 /* Save and update cb to test cb */
63 save_cb = inl_dev->work_cb;
64 save_cb_args = inl_dev->cb_args;
65 inl_dev->work_cb = nix_inl_selftest_work_cb;
66 inl_dev->cb_args = work_arr;
68 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
70 #define WORK_MAGIC1 0x335577ff0
71 #define WORK_MAGIC2 0xdeadbeef0
74 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
75 roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
76 add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
77 roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
81 /* Check if we got expected work */
82 if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
83 plt_err("Failed to get expected work, [0]=%p [1]=%p",
84 (void *)work_arr[0], (void *)work_arr[1]);
89 plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
94 inl_dev->work_cb = save_cb;
95 inl_dev->cb_args = save_cb_args;
100 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
102 struct mbox *mbox = (&inl_dev->dev)->mbox;
105 req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
109 return mbox_process(mbox);
113 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
115 struct nix_inline_ipsec_lf_cfg *lf_cfg;
116 struct mbox *mbox = (&inl_dev->dev)->mbox;
119 lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
124 sa_w = plt_align32pow2(inl_dev->ipsec_in_max_spi + 1);
125 sa_w = plt_log2_u32(sa_w);
128 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
129 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
130 /* CN9K SA size is different */
131 if (roc_model_is_cn9k())
132 lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
134 lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
135 lf_cfg->ipsec_cfg1.sa_idx_max = inl_dev->ipsec_in_max_spi;
136 lf_cfg->ipsec_cfg0.sa_pow2_size =
137 plt_log2_u32(inl_dev->inb_sa_sz);
139 lf_cfg->ipsec_cfg0.tag_const = 0;
140 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
145 return mbox_process(mbox);
149 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
151 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
152 struct dev *dev = &inl_dev->dev;
156 if (!inl_dev->attach_cptlf)
160 eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
161 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
162 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
163 rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
165 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
169 /* Setup CPT LF for submitting control opcode */
170 lf = &inl_dev->cpt_lf;
172 lf->nb_desc = 0; /* Set to default */
173 lf->dev = &inl_dev->dev;
174 lf->msixoff = inl_dev->cpt_msixoff;
175 lf->pci_dev = inl_dev->pci_dev;
177 rc = cpt_lf_init(lf);
179 plt_err("Failed to initialize CPT LF, rc=%d", rc);
183 roc_cpt_iq_enable(lf);
186 rc |= cpt_lfs_free(dev);
191 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
193 struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
194 struct dev *dev = &inl_dev->dev;
197 if (!inl_dev->attach_cptlf)
200 /* Cleanup CPT LF queue */
203 /* Free LF resources */
204 rc = cpt_lfs_free(dev);
206 plt_err("Failed to free CPT LF resources, rc=%d", rc);
210 rc = cpt_lfs_detach(dev);
212 plt_err("Failed to detach CPT LF, rc=%d", rc);
219 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
221 struct sso_lf_alloc_rsp *sso_rsp;
222 struct dev *dev = &inl_dev->dev;
223 uint32_t xaq_cnt, count, aura;
224 uint16_t hwgrp[1] = {0};
225 struct npa_pool_s pool;
230 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
232 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
237 rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
239 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
243 inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
244 inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
245 inl_dev->iue = sso_rsp->in_unit_entries;
247 /* Create XAQ pool */
248 xaq_cnt = XAQ_CACHE_CNT;
249 xaq_cnt += inl_dev->iue / inl_dev->xae_waes;
250 plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
252 inl_dev->xaq_mem = plt_zmalloc(inl_dev->xaq_buf_size * xaq_cnt,
253 inl_dev->xaq_buf_size);
254 if (!inl_dev->xaq_mem) {
256 plt_err("Failed to alloc xaq buf mem");
260 memset(&pool, 0, sizeof(struct npa_pool_s));
262 rc = roc_npa_pool_create(&inl_dev->xaq_aura, inl_dev->xaq_buf_size,
263 xaq_cnt, NULL, &pool);
265 plt_err("Failed to alloc aura for XAQ, rc=%d", rc);
269 /* Fill the XAQ buffers */
270 iova = (uint64_t)inl_dev->xaq_mem;
271 for (count = 0; count < xaq_cnt; count++) {
272 roc_npa_aura_op_free(inl_dev->xaq_aura, 0, iova);
273 iova += inl_dev->xaq_buf_size;
275 roc_npa_aura_op_range_set(inl_dev->xaq_aura, (uint64_t)inl_dev->xaq_mem,
278 aura = roc_npa_aura_handle_to_aura(inl_dev->xaq_aura);
280 /* Setup xaq for hwgrps */
281 rc = sso_hwgrp_alloc_xaq(dev, aura, 1);
283 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
287 /* Register SSO, SSOW error and work irq's */
288 rc = nix_inl_sso_register_irqs(inl_dev);
290 plt_err("Failed to register sso irq's, rc=%d", rc);
294 /* Setup hwgrp->hws link */
295 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
298 plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
303 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
305 roc_npa_pool_destroy(inl_dev->xaq_aura);
306 inl_dev->xaq_aura = 0;
308 plt_free(inl_dev->xaq_mem);
309 inl_dev->xaq_mem = NULL;
311 sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
313 sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
318 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
320 uint16_t hwgrp[1] = {0};
323 plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
325 /* Unregister SSO/SSOW IRQ's */
326 nix_inl_sso_unregister_irqs(inl_dev);
329 sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
331 /* Release XAQ aura */
332 sso_hwgrp_release_xaq(&inl_dev->dev, 1);
334 /* Free SSO, SSOW LF's */
335 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
336 sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
342 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
344 uint16_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
345 struct dev *dev = &inl_dev->dev;
346 struct mbox *mbox = dev->mbox;
347 struct nix_lf_alloc_rsp *rsp;
348 struct nix_lf_alloc_req *req;
353 /* Alloc NIX LF needed for single RQ */
354 req = mbox_alloc_msg_nix_lf_alloc(mbox);
361 req->xqe_sz = NIX_XQESZ_W16;
362 /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
363 req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
364 req->rss_grps = ROC_NIX_RSS_GRPS;
365 req->npa_func = idev_npa_pffunc_get();
366 req->sso_func = dev->pf_func;
367 req->rx_cfg = NIX_INL_LF_RX_CFG;
368 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
370 if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
371 roc_model_is_cnf10kb_a0())
372 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
374 rc = mbox_process_msg(mbox, (void *)&rsp);
376 plt_err("Failed to alloc lf, rc=%d", rc);
380 inl_dev->lf_tx_stats = rsp->lf_tx_stats;
381 inl_dev->lf_rx_stats = rsp->lf_rx_stats;
382 inl_dev->qints = rsp->qints;
383 inl_dev->cints = rsp->cints;
385 /* Register nix interrupts */
386 rc = nix_inl_nix_register_irqs(inl_dev);
388 plt_err("Failed to register nix irq's, rc=%d", rc);
392 /* CN9K SA is different */
393 if (roc_model_is_cn9k())
394 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
396 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
398 /* Alloc contiguous memory for Inbound SA's */
399 inl_dev->inb_sa_sz = inb_sa_sz;
400 inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
401 ROC_NIX_INL_SA_BASE_ALIGN);
402 if (!inl_dev->inb_sa_base) {
403 plt_err("Failed to allocate memory for Inbound SA");
405 goto unregister_irqs;
408 if (roc_model_is_cn10k()) {
409 for (i = 0; i < ipsec_in_max_spi; i++) {
410 sa = ((uint8_t *)inl_dev->inb_sa_base) +
412 roc_nix_inl_inb_sa_init(sa);
415 /* Setup device specific inb SA table */
416 rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
418 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
424 plt_free(inl_dev->inb_sa_base);
425 inl_dev->inb_sa_base = NULL;
427 nix_inl_nix_unregister_irqs(inl_dev);
429 mbox_alloc_msg_nix_lf_free(mbox);
430 rc |= mbox_process(mbox);
435 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
437 struct dev *dev = &inl_dev->dev;
438 struct mbox *mbox = dev->mbox;
439 struct nix_lf_free_req *req;
440 struct ndc_sync_op *ndc_req;
443 /* Disable Inbound processing */
444 rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
446 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
448 /* Sync NDC-NIX for LF */
449 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
452 ndc_req->nix_lf_rx_sync = 1;
453 rc = mbox_process(mbox);
455 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
457 /* Unregister IRQs */
458 nix_inl_nix_unregister_irqs(inl_dev);
460 /* By default all associated mcam rules are deleted */
461 req = mbox_alloc_msg_nix_lf_free(mbox);
465 return mbox_process(mbox);
469 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
471 struct msix_offset_rsp *msix_rsp;
472 struct dev *dev = &inl_dev->dev;
473 struct mbox *mbox = dev->mbox;
474 struct rsrc_attach_req *req;
475 uint64_t nix_blkaddr;
478 req = mbox_alloc_msg_attach_resources(mbox);
482 /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
486 if (inl_dev->attach_cptlf) {
488 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
491 rc = mbox_process(dev->mbox);
495 /* Get MSIX vector offsets */
496 mbox_alloc_msg_msix_offset(mbox);
497 rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
501 inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
502 inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
503 inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
504 inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
506 nix_blkaddr = nix_get_blkaddr(dev);
507 inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
509 /* Update base addresses for LF's */
510 inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
511 inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
512 inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
513 inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
519 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
521 struct dev *dev = &inl_dev->dev;
522 struct mbox *mbox = dev->mbox;
523 struct rsrc_detach_req *req;
526 req = mbox_alloc_msg_detach_resources(mbox);
533 req->cptlfs = !!inl_dev->attach_cptlf;
535 return mbox_process(dev->mbox);
539 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
541 struct plt_pci_device *pci_dev;
542 struct nix_inl_dev *inl_dev;
543 struct idev_cfg *idev;
546 pci_dev = roc_inl_dev->pci_dev;
548 /* Skip probe if already done */
549 idev = idev_get_cfg();
553 if (idev->nix_inl_dev) {
554 plt_info("Skipping device %s, inline device already probed",
559 PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
561 inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
562 memset(inl_dev, 0, sizeof(*inl_dev));
564 inl_dev->pci_dev = pci_dev;
565 inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
566 inl_dev->selftest = roc_inl_dev->selftest;
567 inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
568 inl_dev->channel = roc_inl_dev->channel;
569 inl_dev->chan_mask = roc_inl_dev->chan_mask;
570 inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
572 /* Initialize base device */
573 rc = dev_init(&inl_dev->dev, pci_dev);
575 plt_err("Failed to init roc device");
579 /* Attach LF resources */
580 rc = nix_inl_lf_attach(inl_dev);
582 plt_err("Failed to attach LF resources, rc=%d", rc);
587 rc = nix_inl_nix_setup(inl_dev);
592 rc = nix_inl_sso_setup(inl_dev);
597 rc = nix_inl_cpt_setup(inl_dev);
601 /* Perform selftest if asked for */
602 if (inl_dev->selftest) {
603 rc = nix_inl_selftest();
608 idev->nix_inl_dev = inl_dev;
612 rc |= nix_inl_cpt_release(inl_dev);
614 rc |= nix_inl_sso_release(inl_dev);
616 rc |= nix_inl_nix_release(inl_dev);
618 rc |= nix_inl_lf_detach(inl_dev);
620 rc |= dev_fini(&inl_dev->dev, pci_dev);
626 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
628 struct plt_pci_device *pci_dev;
629 struct nix_inl_dev *inl_dev;
630 struct idev_cfg *idev;
633 idev = idev_get_cfg();
637 if (!idev->nix_inl_dev ||
638 PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
641 inl_dev = idev->nix_inl_dev;
642 pci_dev = inl_dev->pci_dev;
644 /* Flush Inbound CTX cache entries */
645 nix_inl_cpt_ctx_cache_sync(inl_dev);
648 rc = nix_inl_sso_release(inl_dev);
651 rc |= nix_inl_nix_release(inl_dev);
654 rc |= nix_inl_lf_detach(inl_dev);
657 rc |= dev_fini(&inl_dev->dev, pci_dev);
661 idev->nix_inl_dev = NULL;