crypto/qat: rework asymmetric op build operation
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 /* Default Rx Config for Inline NIX LF */
9 #define NIX_INL_LF_RX_CFG                                                      \
10         (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
11          ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
12          ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
13          ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
14          ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
15
16 uint16_t
17 nix_inl_dev_pffunc_get(void)
18 {
19         struct idev_cfg *idev = idev_get_cfg();
20         struct nix_inl_dev *inl_dev;
21
22         if (idev != NULL) {
23                 inl_dev = idev->nix_inl_dev;
24                 if (inl_dev)
25                         return inl_dev->dev.pf_func;
26         }
27         return 0;
28 }
29
30 static void
31 nix_inl_selftest_work_cb(uint64_t *gw, void *args)
32 {
33         uintptr_t work = gw[1];
34
35         *((uintptr_t *)args + (gw[0] & 0x1)) = work;
36
37         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
38 }
39
40 static int
41 nix_inl_selftest(void)
42 {
43         struct idev_cfg *idev = idev_get_cfg();
44         roc_nix_inl_sso_work_cb_t save_cb;
45         static uintptr_t work_arr[2];
46         struct nix_inl_dev *inl_dev;
47         void *save_cb_args;
48         uint64_t add_work0;
49         int rc = 0;
50
51         if (idev == NULL)
52                 return -ENOTSUP;
53
54         inl_dev = idev->nix_inl_dev;
55         if (inl_dev == NULL)
56                 return -ENOTSUP;
57
58         plt_info("Performing nix inl self test");
59
60         /* Save and update cb to test cb */
61         save_cb = inl_dev->work_cb;
62         save_cb_args = inl_dev->cb_args;
63         inl_dev->work_cb = nix_inl_selftest_work_cb;
64         inl_dev->cb_args = work_arr;
65
66         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
67
68 #define WORK_MAGIC1 0x335577ff0
69 #define WORK_MAGIC2 0xdeadbeef0
70
71         /* Add work */
72         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
73         roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
74         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
75         roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
76
77         plt_delay_ms(10000);
78
79         /* Check if we got expected work */
80         if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
81                 plt_err("Failed to get expected work, [0]=%p [1]=%p",
82                         (void *)work_arr[0], (void *)work_arr[1]);
83                 rc = -EFAULT;
84                 goto exit;
85         }
86
87         plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
88                  (void *)work_arr[1]);
89
90 exit:
91         /* Restore state */
92         inl_dev->work_cb = save_cb;
93         inl_dev->cb_args = save_cb_args;
94         return rc;
95 }
96
97 static int
98 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
99 {
100         struct mbox *mbox = (&inl_dev->dev)->mbox;
101         struct msg_req *req;
102
103         req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
104         if (req == NULL)
105                 return -ENOSPC;
106
107         return mbox_process(mbox);
108 }
109
110 static int
111 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
112 {
113         struct nix_inline_ipsec_lf_cfg *lf_cfg;
114         struct mbox *mbox = (&inl_dev->dev)->mbox;
115         uint32_t sa_w;
116
117         lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
118         if (lf_cfg == NULL)
119                 return -ENOSPC;
120
121         if (ena) {
122                 sa_w = plt_align32pow2(inl_dev->ipsec_in_max_spi + 1);
123                 sa_w = plt_log2_u32(sa_w);
124
125                 lf_cfg->enable = 1;
126                 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
127                 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
128                 /* CN9K SA size is different */
129                 if (roc_model_is_cn9k())
130                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
131                 else
132                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
133                 lf_cfg->ipsec_cfg1.sa_idx_max = inl_dev->ipsec_in_max_spi;
134                 lf_cfg->ipsec_cfg0.sa_pow2_size =
135                         plt_log2_u32(inl_dev->inb_sa_sz);
136
137                 lf_cfg->ipsec_cfg0.tag_const = 0;
138                 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
139         } else {
140                 lf_cfg->enable = 0;
141         }
142
143         return mbox_process(mbox);
144 }
145
146 static int
147 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
148 {
149         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
150         struct dev *dev = &inl_dev->dev;
151         uint8_t eng_grpmask;
152         int rc;
153
154         if (!inl_dev->attach_cptlf)
155                 return 0;
156
157         /* Alloc CPT LF */
158         eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
159                        1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
160                        1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
161         rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
162         if (rc) {
163                 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
164                 return rc;
165         }
166
167         /* Setup CPT LF for submitting control opcode */
168         lf = &inl_dev->cpt_lf;
169         lf->lf_id = 0;
170         lf->nb_desc = 0; /* Set to default */
171         lf->dev = &inl_dev->dev;
172         lf->msixoff = inl_dev->cpt_msixoff;
173         lf->pci_dev = inl_dev->pci_dev;
174
175         rc = cpt_lf_init(lf);
176         if (rc) {
177                 plt_err("Failed to initialize CPT LF, rc=%d", rc);
178                 goto lf_free;
179         }
180
181         roc_cpt_iq_enable(lf);
182         return 0;
183 lf_free:
184         rc |= cpt_lfs_free(dev);
185         return rc;
186 }
187
188 static int
189 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
190 {
191         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
192         struct dev *dev = &inl_dev->dev;
193         int rc, ret = 0;
194
195         if (!inl_dev->attach_cptlf)
196                 return 0;
197
198         /* Cleanup CPT LF queue */
199         cpt_lf_fini(lf);
200
201         /* Free LF resources */
202         rc = cpt_lfs_free(dev);
203         if (rc)
204                 plt_err("Failed to free CPT LF resources, rc=%d", rc);
205         ret |= rc;
206
207         /* Detach LF */
208         rc = cpt_lfs_detach(dev);
209         if (rc)
210                 plt_err("Failed to detach CPT LF, rc=%d", rc);
211         ret |= rc;
212
213         return ret;
214 }
215
216 static int
217 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
218 {
219         struct sso_lf_alloc_rsp *sso_rsp;
220         struct dev *dev = &inl_dev->dev;
221         uint16_t hwgrp[1] = {0};
222         uint32_t xae_cnt;
223         int rc;
224
225         /* Alloc SSOW LF */
226         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
227         if (rc) {
228                 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
229                 return rc;
230         }
231
232         /* Alloc HWGRP LF */
233         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
234         if (rc) {
235                 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
236                 goto free_ssow;
237         }
238
239         inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
240         inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
241         inl_dev->iue = sso_rsp->in_unit_entries;
242
243         xae_cnt = inl_dev->iue;
244         rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, xae_cnt,
245                                      inl_dev->xae_waes, inl_dev->xaq_buf_size,
246                                      1);
247         if (rc) {
248                 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
249                 goto free_sso;
250         }
251
252         /* Setup xaq for hwgrps */
253         rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
254         if (rc) {
255                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
256                 goto destroy_pool;
257         }
258
259         /* Register SSO, SSOW error and work irq's */
260         rc = nix_inl_sso_register_irqs(inl_dev);
261         if (rc) {
262                 plt_err("Failed to register sso irq's, rc=%d", rc);
263                 goto release_xaq;
264         }
265
266         /* Setup hwgrp->hws link */
267         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
268
269         /* Enable HWGRP */
270         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
271
272         return 0;
273
274 release_xaq:
275         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
276 destroy_pool:
277         sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
278 free_sso:
279         sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
280 free_ssow:
281         sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
282         return rc;
283 }
284
285 static int
286 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
287 {
288         uint16_t hwgrp[1] = {0};
289
290         /* Disable HWGRP */
291         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
292
293         /* Unregister SSO/SSOW IRQ's */
294         nix_inl_sso_unregister_irqs(inl_dev);
295
296         /* Unlink hws */
297         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
298
299         /* Release XAQ aura */
300         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
301
302         /* Free SSO, SSOW LF's */
303         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
304         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
305
306         /* Free the XAQ aura */
307         sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
308
309         return 0;
310 }
311
312 static int
313 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
314 {
315         uint16_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
316         struct dev *dev = &inl_dev->dev;
317         struct mbox *mbox = dev->mbox;
318         struct nix_lf_alloc_rsp *rsp;
319         struct nix_lf_alloc_req *req;
320         struct nix_hw_info *hw_info;
321         size_t inb_sa_sz;
322         int i, rc = -ENOSPC;
323         void *sa;
324
325         /* Alloc NIX LF needed for single RQ */
326         req = mbox_alloc_msg_nix_lf_alloc(mbox);
327         if (req == NULL)
328                 return rc;
329         req->rq_cnt = 1;
330         req->sq_cnt = 1;
331         req->cq_cnt = 1;
332         /* XQESZ is W16 */
333         req->xqe_sz = NIX_XQESZ_W16;
334         /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
335         req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
336         req->rss_grps = ROC_NIX_RSS_GRPS;
337         req->npa_func = idev_npa_pffunc_get();
338         req->sso_func = dev->pf_func;
339         req->rx_cfg = NIX_INL_LF_RX_CFG;
340         req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
341
342         if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
343             roc_model_is_cnf10kb_a0())
344                 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
345
346         rc = mbox_process_msg(mbox, (void *)&rsp);
347         if (rc) {
348                 plt_err("Failed to alloc lf, rc=%d", rc);
349                 return rc;
350         }
351
352         inl_dev->lf_tx_stats = rsp->lf_tx_stats;
353         inl_dev->lf_rx_stats = rsp->lf_rx_stats;
354         inl_dev->qints = rsp->qints;
355         inl_dev->cints = rsp->cints;
356
357         /* Get VWQE info if supported */
358         if (roc_model_is_cn10k()) {
359                 mbox_alloc_msg_nix_get_hw_info(mbox);
360                 rc = mbox_process_msg(mbox, (void *)&hw_info);
361                 if (rc) {
362                         plt_err("Failed to get HW info, rc=%d", rc);
363                         goto lf_free;
364                 }
365                 inl_dev->vwqe_interval = hw_info->vwqe_delay;
366         }
367
368         /* Register nix interrupts */
369         rc = nix_inl_nix_register_irqs(inl_dev);
370         if (rc) {
371                 plt_err("Failed to register nix irq's, rc=%d", rc);
372                 goto lf_free;
373         }
374
375         /* CN9K SA is different */
376         if (roc_model_is_cn9k())
377                 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
378         else
379                 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
380
381         /* Alloc contiguous memory for Inbound SA's */
382         inl_dev->inb_sa_sz = inb_sa_sz;
383         inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
384                                            ROC_NIX_INL_SA_BASE_ALIGN);
385         if (!inl_dev->inb_sa_base) {
386                 plt_err("Failed to allocate memory for Inbound SA");
387                 rc = -ENOMEM;
388                 goto unregister_irqs;
389         }
390
391         if (roc_model_is_cn10k()) {
392                 for (i = 0; i < ipsec_in_max_spi; i++) {
393                         sa = ((uint8_t *)inl_dev->inb_sa_base) +
394                              (i * inb_sa_sz);
395                         roc_nix_inl_inb_sa_init(sa);
396                 }
397         }
398         /* Setup device specific inb SA table */
399         rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
400         if (rc) {
401                 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
402                 goto free_mem;
403         }
404
405         return 0;
406 free_mem:
407         plt_free(inl_dev->inb_sa_base);
408         inl_dev->inb_sa_base = NULL;
409 unregister_irqs:
410         nix_inl_nix_unregister_irqs(inl_dev);
411 lf_free:
412         mbox_alloc_msg_nix_lf_free(mbox);
413         rc |= mbox_process(mbox);
414         return rc;
415 }
416
417 static int
418 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
419 {
420         struct dev *dev = &inl_dev->dev;
421         struct mbox *mbox = dev->mbox;
422         struct nix_lf_free_req *req;
423         struct ndc_sync_op *ndc_req;
424         int rc = -ENOSPC;
425
426         /* Disable Inbound processing */
427         rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
428         if (rc)
429                 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
430
431         /* Sync NDC-NIX for LF */
432         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
433         if (ndc_req == NULL)
434                 return rc;
435         ndc_req->nix_lf_rx_sync = 1;
436         rc = mbox_process(mbox);
437         if (rc)
438                 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
439
440         /* Unregister IRQs */
441         nix_inl_nix_unregister_irqs(inl_dev);
442
443         /* By default all associated mcam rules are deleted */
444         req = mbox_alloc_msg_nix_lf_free(mbox);
445         if (req == NULL)
446                 return -ENOSPC;
447
448         return mbox_process(mbox);
449 }
450
451 static int
452 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
453 {
454         struct msix_offset_rsp *msix_rsp;
455         struct dev *dev = &inl_dev->dev;
456         struct mbox *mbox = dev->mbox;
457         struct rsrc_attach_req *req;
458         uint64_t nix_blkaddr;
459         int rc = -ENOSPC;
460
461         req = mbox_alloc_msg_attach_resources(mbox);
462         if (req == NULL)
463                 return rc;
464         req->modify = true;
465         /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
466         req->nixlf = true;
467         req->ssow = 1;
468         req->sso = 1;
469         if (inl_dev->attach_cptlf) {
470                 req->cptlfs = 1;
471                 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
472         }
473
474         rc = mbox_process(dev->mbox);
475         if (rc)
476                 return rc;
477
478         /* Get MSIX vector offsets */
479         mbox_alloc_msg_msix_offset(mbox);
480         rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
481         if (rc)
482                 return rc;
483
484         inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
485         inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
486         inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
487         inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
488
489         nix_blkaddr = nix_get_blkaddr(dev);
490         inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
491
492         /* Update base addresses for LF's */
493         inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
494         inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
495         inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
496         inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
497
498         return 0;
499 }
500
501 static int
502 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
503 {
504         struct dev *dev = &inl_dev->dev;
505         struct mbox *mbox = dev->mbox;
506         struct rsrc_detach_req *req;
507         int rc = -ENOSPC;
508
509         req = mbox_alloc_msg_detach_resources(mbox);
510         if (req == NULL)
511                 return rc;
512         req->partial = true;
513         req->nixlf = true;
514         req->ssow = true;
515         req->sso = true;
516         req->cptlfs = !!inl_dev->attach_cptlf;
517
518         return mbox_process(dev->mbox);
519 }
520
521 int
522 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
523 {
524         struct plt_pci_device *pci_dev;
525         struct nix_inl_dev *inl_dev;
526         struct idev_cfg *idev;
527         int rc;
528
529         pci_dev = roc_inl_dev->pci_dev;
530
531         /* Skip probe if already done */
532         idev = idev_get_cfg();
533         if (idev == NULL)
534                 return -ENOTSUP;
535
536         if (idev->nix_inl_dev) {
537                 plt_info("Skipping device %s, inline device already probed",
538                          pci_dev->name);
539                 return -EEXIST;
540         }
541
542         PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
543
544         inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
545         memset(inl_dev, 0, sizeof(*inl_dev));
546
547         inl_dev->pci_dev = pci_dev;
548         inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
549         inl_dev->selftest = roc_inl_dev->selftest;
550         inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
551         inl_dev->channel = roc_inl_dev->channel;
552         inl_dev->chan_mask = roc_inl_dev->chan_mask;
553         inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
554
555         /* Initialize base device */
556         rc = dev_init(&inl_dev->dev, pci_dev);
557         if (rc) {
558                 plt_err("Failed to init roc device");
559                 goto error;
560         }
561
562         /* Attach LF resources */
563         rc = nix_inl_lf_attach(inl_dev);
564         if (rc) {
565                 plt_err("Failed to attach LF resources, rc=%d", rc);
566                 goto dev_cleanup;
567         }
568
569         /* Setup NIX LF */
570         rc = nix_inl_nix_setup(inl_dev);
571         if (rc)
572                 goto lf_detach;
573
574         /* Setup SSO LF */
575         rc = nix_inl_sso_setup(inl_dev);
576         if (rc)
577                 goto nix_release;
578
579         /* Setup CPT LF */
580         rc = nix_inl_cpt_setup(inl_dev);
581         if (rc)
582                 goto sso_release;
583
584         /* Perform selftest if asked for */
585         if (inl_dev->selftest) {
586                 rc = nix_inl_selftest();
587                 if (rc)
588                         goto cpt_release;
589         }
590
591         idev->nix_inl_dev = inl_dev;
592
593         return 0;
594 cpt_release:
595         rc |= nix_inl_cpt_release(inl_dev);
596 sso_release:
597         rc |= nix_inl_sso_release(inl_dev);
598 nix_release:
599         rc |= nix_inl_nix_release(inl_dev);
600 lf_detach:
601         rc |= nix_inl_lf_detach(inl_dev);
602 dev_cleanup:
603         rc |= dev_fini(&inl_dev->dev, pci_dev);
604 error:
605         return rc;
606 }
607
608 int
609 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
610 {
611         struct plt_pci_device *pci_dev;
612         struct nix_inl_dev *inl_dev;
613         struct idev_cfg *idev;
614         int rc;
615
616         idev = idev_get_cfg();
617         if (idev == NULL)
618                 return 0;
619
620         if (!idev->nix_inl_dev ||
621             PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
622                 return 0;
623
624         inl_dev = idev->nix_inl_dev;
625         pci_dev = inl_dev->pci_dev;
626
627         /* Flush Inbound CTX cache entries */
628         nix_inl_cpt_ctx_cache_sync(inl_dev);
629
630         /* Release SSO */
631         rc = nix_inl_sso_release(inl_dev);
632
633         /* Release NIX */
634         rc |= nix_inl_nix_release(inl_dev);
635
636         /* Detach LF's */
637         rc |= nix_inl_lf_detach(inl_dev);
638
639         /* Cleanup mbox */
640         rc |= dev_fini(&inl_dev->dev, pci_dev);
641         if (rc)
642                 return rc;
643
644         idev->nix_inl_dev = NULL;
645         return 0;
646 }