net/mlx5: introduce hardware steering operation
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #define NIX_AURA_DROP_PC_DFLT 40
9
10 /* Default Rx Config for Inline NIX LF */
11 #define NIX_INL_LF_RX_CFG                                                      \
12         (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
13          ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
14          ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
15          ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
16          ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
17
18 uint16_t
19 nix_inl_dev_pffunc_get(void)
20 {
21         struct idev_cfg *idev = idev_get_cfg();
22         struct nix_inl_dev *inl_dev;
23
24         if (idev != NULL) {
25                 inl_dev = idev->nix_inl_dev;
26                 if (inl_dev)
27                         return inl_dev->dev.pf_func;
28         }
29         return 0;
30 }
31
32 uint16_t
33 roc_nix_inl_dev_pffunc_get(void)
34 {
35         return nix_inl_dev_pffunc_get();
36 }
37
38 static void
39 nix_inl_selftest_work_cb(uint64_t *gw, void *args)
40 {
41         uintptr_t work = gw[1];
42
43         *((uintptr_t *)args + (gw[0] & 0x1)) = work;
44
45         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
46 }
47
48 static int
49 nix_inl_selftest(void)
50 {
51         struct idev_cfg *idev = idev_get_cfg();
52         roc_nix_inl_sso_work_cb_t save_cb;
53         static uintptr_t work_arr[2];
54         struct nix_inl_dev *inl_dev;
55         void *save_cb_args;
56         uint64_t add_work0;
57         int rc = 0;
58
59         if (idev == NULL)
60                 return -ENOTSUP;
61
62         inl_dev = idev->nix_inl_dev;
63         if (inl_dev == NULL)
64                 return -ENOTSUP;
65
66         plt_info("Performing nix inl self test");
67
68         /* Save and update cb to test cb */
69         save_cb = inl_dev->work_cb;
70         save_cb_args = inl_dev->cb_args;
71         inl_dev->work_cb = nix_inl_selftest_work_cb;
72         inl_dev->cb_args = work_arr;
73
74         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
75
76 #define WORK_MAGIC1 0x335577ff0
77 #define WORK_MAGIC2 0xdeadbeef0
78
79         /* Add work */
80         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
81         roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
82         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
83         roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
84
85         plt_delay_ms(10000);
86
87         /* Check if we got expected work */
88         if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
89                 plt_err("Failed to get expected work, [0]=%p [1]=%p",
90                         (void *)work_arr[0], (void *)work_arr[1]);
91                 rc = -EFAULT;
92                 goto exit;
93         }
94
95         plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
96                  (void *)work_arr[1]);
97
98 exit:
99         /* Restore state */
100         inl_dev->work_cb = save_cb;
101         inl_dev->cb_args = save_cb_args;
102         return rc;
103 }
104
105 static int
106 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
107 {
108         struct mbox *mbox = (&inl_dev->dev)->mbox;
109         struct msg_req *req;
110
111         req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
112         if (req == NULL)
113                 return -ENOSPC;
114
115         return mbox_process(mbox);
116 }
117
118 static int
119 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
120 {
121         struct nix_inline_ipsec_lf_cfg *lf_cfg;
122         struct mbox *mbox = (&inl_dev->dev)->mbox;
123         uint64_t max_sa;
124         uint32_t sa_w;
125
126         lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
127         if (lf_cfg == NULL)
128                 return -ENOSPC;
129
130         if (ena) {
131
132                 max_sa = inl_dev->inb_spi_mask + 1;
133                 sa_w = plt_log2_u32(max_sa);
134
135                 lf_cfg->enable = 1;
136                 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
137                 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
138                 /* CN9K SA size is different */
139                 if (roc_model_is_cn9k())
140                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
141                 else
142                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
143                 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
144                 lf_cfg->ipsec_cfg0.sa_pow2_size =
145                         plt_log2_u32(inl_dev->inb_sa_sz);
146
147                 lf_cfg->ipsec_cfg0.tag_const = 0;
148                 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
149         } else {
150                 lf_cfg->enable = 0;
151         }
152
153         return mbox_process(mbox);
154 }
155
156 static int
157 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
158 {
159         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
160         struct dev *dev = &inl_dev->dev;
161         uint8_t eng_grpmask;
162         int rc;
163
164         if (!inl_dev->attach_cptlf)
165                 return 0;
166
167         /* Alloc CPT LF */
168         eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
169                        1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
170                        1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
171         rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
172         if (rc) {
173                 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
174                 return rc;
175         }
176
177         /* Setup CPT LF for submitting control opcode */
178         lf = &inl_dev->cpt_lf;
179         lf->lf_id = 0;
180         lf->nb_desc = 0; /* Set to default */
181         lf->dev = &inl_dev->dev;
182         lf->msixoff = inl_dev->cpt_msixoff;
183         lf->pci_dev = inl_dev->pci_dev;
184
185         rc = cpt_lf_init(lf);
186         if (rc) {
187                 plt_err("Failed to initialize CPT LF, rc=%d", rc);
188                 goto lf_free;
189         }
190
191         roc_cpt_iq_enable(lf);
192         return 0;
193 lf_free:
194         rc |= cpt_lfs_free(dev);
195         return rc;
196 }
197
198 static int
199 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
200 {
201         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
202         struct dev *dev = &inl_dev->dev;
203         int rc, ret = 0;
204
205         if (!inl_dev->attach_cptlf)
206                 return 0;
207
208         /* Cleanup CPT LF queue */
209         cpt_lf_fini(lf);
210
211         /* Free LF resources */
212         rc = cpt_lfs_free(dev);
213         if (rc)
214                 plt_err("Failed to free CPT LF resources, rc=%d", rc);
215         ret |= rc;
216
217         /* Detach LF */
218         rc = cpt_lfs_detach(dev);
219         if (rc)
220                 plt_err("Failed to detach CPT LF, rc=%d", rc);
221         ret |= rc;
222
223         return ret;
224 }
225
226 static int
227 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
228 {
229         struct sso_lf_alloc_rsp *sso_rsp;
230         struct dev *dev = &inl_dev->dev;
231         uint16_t hwgrp[1] = {0};
232         int rc;
233
234         /* Alloc SSOW LF */
235         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
236         if (rc) {
237                 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
238                 return rc;
239         }
240
241         /* Alloc HWGRP LF */
242         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
243         if (rc) {
244                 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
245                 goto free_ssow;
246         }
247
248         inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
249         inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
250         inl_dev->iue = sso_rsp->in_unit_entries;
251
252         inl_dev->nb_xae = inl_dev->iue;
253         rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
254                                      inl_dev->xae_waes, inl_dev->xaq_buf_size,
255                                      1);
256         if (rc) {
257                 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
258                 goto free_sso;
259         }
260
261         /* Setup xaq for hwgrps */
262         rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
263         if (rc) {
264                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
265                 goto destroy_pool;
266         }
267
268         /* Register SSO, SSOW error and work irq's */
269         rc = nix_inl_sso_register_irqs(inl_dev);
270         if (rc) {
271                 plt_err("Failed to register sso irq's, rc=%d", rc);
272                 goto release_xaq;
273         }
274
275         /* Setup hwgrp->hws link */
276         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
277
278         /* Enable HWGRP */
279         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
280
281         return 0;
282
283 release_xaq:
284         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
285 destroy_pool:
286         sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
287 free_sso:
288         sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
289 free_ssow:
290         sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
291         return rc;
292 }
293
294 static int
295 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
296 {
297         uint16_t hwgrp[1] = {0};
298
299         /* Disable HWGRP */
300         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
301
302         /* Unregister SSO/SSOW IRQ's */
303         nix_inl_sso_unregister_irqs(inl_dev);
304
305         /* Unlink hws */
306         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
307
308         /* Release XAQ aura */
309         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
310
311         /* Free SSO, SSOW LF's */
312         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
313         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
314
315         /* Free the XAQ aura */
316         sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
317
318         return 0;
319 }
320
321 static int
322 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
323 {
324         uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
325         uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
326         struct dev *dev = &inl_dev->dev;
327         struct mbox *mbox = dev->mbox;
328         struct nix_lf_alloc_rsp *rsp;
329         struct nix_lf_alloc_req *req;
330         struct nix_hw_info *hw_info;
331         uint64_t max_sa, i;
332         size_t inb_sa_sz;
333         int rc = -ENOSPC;
334         void *sa;
335
336         max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
337
338         /* Alloc NIX LF needed for single RQ */
339         req = mbox_alloc_msg_nix_lf_alloc(mbox);
340         if (req == NULL)
341                 return rc;
342         req->rq_cnt = 1;
343         req->sq_cnt = 1;
344         req->cq_cnt = 1;
345         /* XQESZ is W16 */
346         req->xqe_sz = NIX_XQESZ_W16;
347         /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
348         req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
349         req->rss_grps = ROC_NIX_RSS_GRPS;
350         req->npa_func = idev_npa_pffunc_get();
351         req->sso_func = dev->pf_func;
352         req->rx_cfg = NIX_INL_LF_RX_CFG;
353         req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
354
355         if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
356             roc_model_is_cnf10kb_a0())
357                 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
358
359         rc = mbox_process_msg(mbox, (void *)&rsp);
360         if (rc) {
361                 plt_err("Failed to alloc lf, rc=%d", rc);
362                 return rc;
363         }
364
365         inl_dev->lf_tx_stats = rsp->lf_tx_stats;
366         inl_dev->lf_rx_stats = rsp->lf_rx_stats;
367         inl_dev->qints = rsp->qints;
368         inl_dev->cints = rsp->cints;
369
370         /* Get VWQE info if supported */
371         if (roc_model_is_cn10k()) {
372                 mbox_alloc_msg_nix_get_hw_info(mbox);
373                 rc = mbox_process_msg(mbox, (void *)&hw_info);
374                 if (rc) {
375                         plt_err("Failed to get HW info, rc=%d", rc);
376                         goto lf_free;
377                 }
378                 inl_dev->vwqe_interval = hw_info->vwqe_delay;
379         }
380
381         /* Register nix interrupts */
382         rc = nix_inl_nix_register_irqs(inl_dev);
383         if (rc) {
384                 plt_err("Failed to register nix irq's, rc=%d", rc);
385                 goto lf_free;
386         }
387
388         /* CN9K SA is different */
389         if (roc_model_is_cn9k())
390                 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
391         else
392                 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
393
394         /* Alloc contiguous memory for Inbound SA's */
395         inl_dev->inb_sa_sz = inb_sa_sz;
396         inl_dev->inb_spi_mask = max_sa - 1;
397         inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
398                                            ROC_NIX_INL_SA_BASE_ALIGN);
399         if (!inl_dev->inb_sa_base) {
400                 plt_err("Failed to allocate memory for Inbound SA");
401                 rc = -ENOMEM;
402                 goto unregister_irqs;
403         }
404
405         if (roc_model_is_cn10k()) {
406                 for (i = 0; i < max_sa; i++) {
407                         sa = ((uint8_t *)inl_dev->inb_sa_base) +
408                              (i * inb_sa_sz);
409                         roc_ot_ipsec_inb_sa_init(sa, true);
410                 }
411         }
412         /* Setup device specific inb SA table */
413         rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
414         if (rc) {
415                 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
416                 goto free_mem;
417         }
418
419         return 0;
420 free_mem:
421         plt_free(inl_dev->inb_sa_base);
422         inl_dev->inb_sa_base = NULL;
423 unregister_irqs:
424         nix_inl_nix_unregister_irqs(inl_dev);
425 lf_free:
426         mbox_alloc_msg_nix_lf_free(mbox);
427         rc |= mbox_process(mbox);
428         return rc;
429 }
430
431 static int
432 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
433 {
434         struct dev *dev = &inl_dev->dev;
435         struct mbox *mbox = dev->mbox;
436         struct nix_lf_free_req *req;
437         struct ndc_sync_op *ndc_req;
438         int rc = -ENOSPC;
439
440         /* Disable Inbound processing */
441         rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
442         if (rc)
443                 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
444
445         /* Sync NDC-NIX for LF */
446         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
447         if (ndc_req == NULL)
448                 return rc;
449         ndc_req->nix_lf_rx_sync = 1;
450         rc = mbox_process(mbox);
451         if (rc)
452                 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
453
454         /* Unregister IRQs */
455         nix_inl_nix_unregister_irqs(inl_dev);
456
457         /* By default all associated mcam rules are deleted */
458         req = mbox_alloc_msg_nix_lf_free(mbox);
459         if (req == NULL)
460                 return -ENOSPC;
461
462         return mbox_process(mbox);
463 }
464
465 static int
466 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
467 {
468         struct msix_offset_rsp *msix_rsp;
469         struct dev *dev = &inl_dev->dev;
470         struct mbox *mbox = dev->mbox;
471         struct rsrc_attach_req *req;
472         uint64_t nix_blkaddr;
473         int rc = -ENOSPC;
474
475         req = mbox_alloc_msg_attach_resources(mbox);
476         if (req == NULL)
477                 return rc;
478         req->modify = true;
479         /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
480         req->nixlf = true;
481         req->ssow = 1;
482         req->sso = 1;
483         if (inl_dev->attach_cptlf) {
484                 req->cptlfs = 1;
485                 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
486         }
487
488         rc = mbox_process(dev->mbox);
489         if (rc)
490                 return rc;
491
492         /* Get MSIX vector offsets */
493         mbox_alloc_msg_msix_offset(mbox);
494         rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
495         if (rc)
496                 return rc;
497
498         inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
499         inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
500         inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
501         inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
502
503         nix_blkaddr = nix_get_blkaddr(dev);
504         inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
505
506         /* Update base addresses for LF's */
507         inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
508         inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
509         inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
510         inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
511
512         return 0;
513 }
514
515 static int
516 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
517 {
518         struct dev *dev = &inl_dev->dev;
519         struct mbox *mbox = dev->mbox;
520         struct rsrc_detach_req *req;
521         int rc = -ENOSPC;
522
523         req = mbox_alloc_msg_detach_resources(mbox);
524         if (req == NULL)
525                 return rc;
526         req->partial = true;
527         req->nixlf = true;
528         req->ssow = true;
529         req->sso = true;
530         req->cptlfs = !!inl_dev->attach_cptlf;
531
532         return mbox_process(dev->mbox);
533 }
534
535 static int
536 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
537 {
538         uintptr_t sso_base = inl_dev->sso_base;
539         int wait_ms = 3000;
540
541         while (wait_ms > 0) {
542                 /* Break when empty */
543                 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
544                     !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
545                         return 0;
546
547                 plt_delay_us(1000);
548                 wait_ms -= 1;
549         }
550
551         return -ETIMEDOUT;
552 }
553
554 int
555 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
556 {
557         struct idev_cfg *idev = idev_get_cfg();
558         struct nix_inl_dev *inl_dev;
559         int rc, i;
560
561         if (idev == NULL)
562                 return 0;
563
564         inl_dev = idev->nix_inl_dev;
565         /* Nothing to do if no inline device */
566         if (!inl_dev)
567                 return 0;
568
569         if (!aura_handle) {
570                 inl_dev->nb_xae = inl_dev->iue;
571                 goto no_pool;
572         }
573
574         /* Check if aura is already considered */
575         for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
576                 if (inl_dev->pkt_pools[i] == aura_handle)
577                         return 0;
578         }
579
580 no_pool:
581         /* Disable RQ if enabled */
582         if (inl_dev->rq_refs) {
583                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
584                 if (rc) {
585                         plt_err("Failed to disable inline dev RQ, rc=%d", rc);
586                         return rc;
587                 }
588         }
589
590         /* Wait for events to be removed */
591         rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
592         if (rc) {
593                 plt_err("Timeout waiting for inline device event cleanup");
594                 goto exit;
595         }
596
597         /* Disable HWGRP */
598         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
599
600         inl_dev->pkt_pools_cnt++;
601         inl_dev->pkt_pools =
602                 plt_realloc(inl_dev->pkt_pools,
603                             sizeof(uint64_t *) * inl_dev->pkt_pools_cnt, 0);
604         if (!inl_dev->pkt_pools)
605                 inl_dev->pkt_pools_cnt = 0;
606         else
607                 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
608         inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
609
610         /* Realloc XAQ aura */
611         rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
612                                      inl_dev->nb_xae, inl_dev->xae_waes,
613                                      inl_dev->xaq_buf_size, 1);
614         if (rc) {
615                 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
616                 return rc;
617         }
618
619         /* Setup xaq for hwgrps */
620         rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
621         if (rc) {
622                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
623                 return rc;
624         }
625
626         /* Enable HWGRP */
627         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
628
629 exit:
630         /* Renable RQ */
631         if (inl_dev->rq_refs) {
632                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
633                 if (rc)
634                         plt_err("Failed to enable inline dev RQ, rc=%d", rc);
635         }
636
637         return rc;
638 }
639
640 int
641 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
642 {
643         struct plt_pci_device *pci_dev;
644         struct nix_inl_dev *inl_dev;
645         struct idev_cfg *idev;
646         int rc;
647
648         pci_dev = roc_inl_dev->pci_dev;
649
650         /* Skip probe if already done */
651         idev = idev_get_cfg();
652         if (idev == NULL)
653                 return -ENOTSUP;
654
655         if (idev->nix_inl_dev) {
656                 plt_info("Skipping device %s, inline device already probed",
657                          pci_dev->name);
658                 return -EEXIST;
659         }
660
661         PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
662
663         inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
664         memset(inl_dev, 0, sizeof(*inl_dev));
665
666         inl_dev->pci_dev = pci_dev;
667         inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
668         inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
669         inl_dev->selftest = roc_inl_dev->selftest;
670         inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
671         inl_dev->channel = roc_inl_dev->channel;
672         inl_dev->chan_mask = roc_inl_dev->chan_mask;
673         inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
674         inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
675         inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
676         inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
677
678         if (roc_inl_dev->spb_drop_pc)
679                 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
680         if (roc_inl_dev->lpb_drop_pc)
681                 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
682
683         /* Initialize base device */
684         rc = dev_init(&inl_dev->dev, pci_dev);
685         if (rc) {
686                 plt_err("Failed to init roc device");
687                 goto error;
688         }
689
690         /* Attach LF resources */
691         rc = nix_inl_lf_attach(inl_dev);
692         if (rc) {
693                 plt_err("Failed to attach LF resources, rc=%d", rc);
694                 goto dev_cleanup;
695         }
696
697         /* Setup NIX LF */
698         rc = nix_inl_nix_setup(inl_dev);
699         if (rc)
700                 goto lf_detach;
701
702         /* Setup SSO LF */
703         rc = nix_inl_sso_setup(inl_dev);
704         if (rc)
705                 goto nix_release;
706
707         /* Setup CPT LF */
708         rc = nix_inl_cpt_setup(inl_dev);
709         if (rc)
710                 goto sso_release;
711
712         /* Perform selftest if asked for */
713         if (inl_dev->selftest) {
714                 rc = nix_inl_selftest();
715                 if (rc)
716                         goto cpt_release;
717         }
718
719         idev->nix_inl_dev = inl_dev;
720
721         return 0;
722 cpt_release:
723         rc |= nix_inl_cpt_release(inl_dev);
724 sso_release:
725         rc |= nix_inl_sso_release(inl_dev);
726 nix_release:
727         rc |= nix_inl_nix_release(inl_dev);
728 lf_detach:
729         rc |= nix_inl_lf_detach(inl_dev);
730 dev_cleanup:
731         rc |= dev_fini(&inl_dev->dev, pci_dev);
732 error:
733         return rc;
734 }
735
736 int
737 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
738 {
739         struct plt_pci_device *pci_dev;
740         struct nix_inl_dev *inl_dev;
741         struct idev_cfg *idev;
742         int rc;
743
744         idev = idev_get_cfg();
745         if (idev == NULL)
746                 return 0;
747
748         if (!idev->nix_inl_dev ||
749             PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
750                 return 0;
751
752         inl_dev = idev->nix_inl_dev;
753         pci_dev = inl_dev->pci_dev;
754
755         /* Flush Inbound CTX cache entries */
756         nix_inl_cpt_ctx_cache_sync(inl_dev);
757
758         /* Release SSO */
759         rc = nix_inl_sso_release(inl_dev);
760
761         /* Release NIX */
762         rc |= nix_inl_nix_release(inl_dev);
763
764         /* Detach LF's */
765         rc |= nix_inl_lf_detach(inl_dev);
766
767         /* Cleanup mbox */
768         rc |= dev_fini(&inl_dev->dev, pci_dev);
769         if (rc)
770                 return rc;
771
772         idev->nix_inl_dev = NULL;
773         return 0;
774 }