common/cnxk: fix SQ flush sequence
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #include <unistd.h>
9
10 #define NIX_AURA_DROP_PC_DFLT 40
11
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14         (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15          ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16          ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
17          ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
18          ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
19
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26         struct idev_cfg *idev = idev_get_cfg();
27         struct nix_inl_dev *inl_dev;
28
29         if (idev != NULL) {
30                 inl_dev = idev->nix_inl_dev;
31                 if (inl_dev)
32                         return inl_dev->dev.pf_func;
33         }
34         return 0;
35 }
36
37 uint16_t
38 roc_nix_inl_dev_pffunc_get(void)
39 {
40         return nix_inl_dev_pffunc_get();
41 }
42
43 static void
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
45 {
46         uintptr_t work = gw[1];
47
48         (void)soft_exp_event;
49         *((uintptr_t *)args + (gw[0] & 0x1)) = work;
50
51         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
52 }
53
54 static int
55 nix_inl_selftest(void)
56 {
57         struct idev_cfg *idev = idev_get_cfg();
58         roc_nix_inl_sso_work_cb_t save_cb;
59         static uintptr_t work_arr[2];
60         struct nix_inl_dev *inl_dev;
61         void *save_cb_args;
62         uint64_t add_work0;
63         int rc = 0;
64
65         if (idev == NULL)
66                 return -ENOTSUP;
67
68         inl_dev = idev->nix_inl_dev;
69         if (inl_dev == NULL)
70                 return -ENOTSUP;
71
72         plt_info("Performing nix inl self test");
73
74         /* Save and update cb to test cb */
75         save_cb = inl_dev->work_cb;
76         save_cb_args = inl_dev->cb_args;
77         inl_dev->work_cb = nix_inl_selftest_work_cb;
78         inl_dev->cb_args = work_arr;
79
80         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
81
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
84
85         /* Add work */
86         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87         roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89         roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
90
91         plt_delay_ms(10000);
92
93         /* Check if we got expected work */
94         if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95                 plt_err("Failed to get expected work, [0]=%p [1]=%p",
96                         (void *)work_arr[0], (void *)work_arr[1]);
97                 rc = -EFAULT;
98                 goto exit;
99         }
100
101         plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102                  (void *)work_arr[1]);
103
104 exit:
105         /* Restore state */
106         inl_dev->work_cb = save_cb;
107         inl_dev->cb_args = save_cb_args;
108         return rc;
109 }
110
111 static int
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
113 {
114         struct mbox *mbox = (&inl_dev->dev)->mbox;
115         struct msg_req *req;
116
117         req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
118         if (req == NULL)
119                 return -ENOSPC;
120
121         return mbox_process(mbox);
122 }
123
124 static int
125 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
126 {
127         struct nix_inline_ipsec_lf_cfg *lf_cfg;
128         struct mbox *mbox = (&inl_dev->dev)->mbox;
129         uint64_t max_sa;
130         uint32_t sa_w;
131
132         lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
133         if (lf_cfg == NULL)
134                 return -ENOSPC;
135
136         if (ena) {
137
138                 max_sa = inl_dev->inb_spi_mask + 1;
139                 sa_w = plt_log2_u32(max_sa);
140
141                 lf_cfg->enable = 1;
142                 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
143                 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
144                 /* CN9K SA size is different */
145                 if (roc_model_is_cn9k())
146                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
147                 else
148                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
149                 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
150                 lf_cfg->ipsec_cfg0.sa_pow2_size =
151                         plt_log2_u32(inl_dev->inb_sa_sz);
152
153                 lf_cfg->ipsec_cfg0.tag_const = 0;
154                 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
155         } else {
156                 lf_cfg->enable = 0;
157         }
158
159         return mbox_process(mbox);
160 }
161
162 static int
163 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
164 {
165         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
166         struct dev *dev = &inl_dev->dev;
167         uint8_t eng_grpmask;
168         int rc;
169
170         if (!inl_dev->attach_cptlf)
171                 return 0;
172
173         /* Alloc CPT LF */
174         eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
175                        1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
176                        1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
177         rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
178         if (rc) {
179                 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
180                 return rc;
181         }
182
183         /* Setup CPT LF for submitting control opcode */
184         lf = &inl_dev->cpt_lf;
185         lf->lf_id = 0;
186         lf->nb_desc = 0; /* Set to default */
187         lf->dev = &inl_dev->dev;
188         lf->msixoff = inl_dev->cpt_msixoff;
189         lf->pci_dev = inl_dev->pci_dev;
190
191         rc = cpt_lf_init(lf);
192         if (rc) {
193                 plt_err("Failed to initialize CPT LF, rc=%d", rc);
194                 goto lf_free;
195         }
196
197         roc_cpt_iq_enable(lf);
198         return 0;
199 lf_free:
200         rc |= cpt_lfs_free(dev);
201         return rc;
202 }
203
204 static int
205 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
206 {
207         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
208         struct dev *dev = &inl_dev->dev;
209         int rc, ret = 0;
210
211         if (!inl_dev->attach_cptlf)
212                 return 0;
213
214         /* Cleanup CPT LF queue */
215         cpt_lf_fini(lf);
216
217         /* Free LF resources */
218         rc = cpt_lfs_free(dev);
219         if (rc)
220                 plt_err("Failed to free CPT LF resources, rc=%d", rc);
221         ret |= rc;
222
223         /* Detach LF */
224         rc = cpt_lfs_detach(dev);
225         if (rc)
226                 plt_err("Failed to detach CPT LF, rc=%d", rc);
227         ret |= rc;
228
229         return ret;
230 }
231
232 static int
233 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
234 {
235         struct sso_lf_alloc_rsp *sso_rsp;
236         struct dev *dev = &inl_dev->dev;
237         uint16_t hwgrp[1] = {0};
238         int rc;
239
240         /* Alloc SSOW LF */
241         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
242         if (rc) {
243                 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
244                 return rc;
245         }
246
247         /* Alloc HWGRP LF */
248         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
249         if (rc) {
250                 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
251                 goto free_ssow;
252         }
253
254         inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
255         inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
256         inl_dev->iue = sso_rsp->in_unit_entries;
257
258         inl_dev->nb_xae = inl_dev->iue;
259         rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
260                                      inl_dev->xae_waes, inl_dev->xaq_buf_size,
261                                      1);
262         if (rc) {
263                 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
264                 goto free_sso;
265         }
266
267         /* Setup xaq for hwgrps */
268         rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
269         if (rc) {
270                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
271                 goto destroy_pool;
272         }
273
274         /* Register SSO, SSOW error and work irq's */
275         rc = nix_inl_sso_register_irqs(inl_dev);
276         if (rc) {
277                 plt_err("Failed to register sso irq's, rc=%d", rc);
278                 goto release_xaq;
279         }
280
281         /* Setup hwgrp->hws link */
282         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
283
284         /* Enable HWGRP */
285         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
286
287         return 0;
288
289 release_xaq:
290         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
291 destroy_pool:
292         sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
293 free_sso:
294         sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
295 free_ssow:
296         sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
297         return rc;
298 }
299
300 static int
301 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
302 {
303         uint16_t hwgrp[1] = {0};
304
305         /* Disable HWGRP */
306         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
307
308         /* Unregister SSO/SSOW IRQ's */
309         nix_inl_sso_unregister_irqs(inl_dev);
310
311         /* Unlink hws */
312         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
313
314         /* Release XAQ aura */
315         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
316
317         /* Free SSO, SSOW LF's */
318         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
319         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
320
321         /* Free the XAQ aura */
322         sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
323
324         return 0;
325 }
326
327 static int
328 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
329 {
330         uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
331         uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
332         struct dev *dev = &inl_dev->dev;
333         struct mbox *mbox = dev->mbox;
334         struct nix_lf_alloc_rsp *rsp;
335         struct nix_lf_alloc_req *req;
336         struct nix_hw_info *hw_info;
337         uint64_t max_sa, i;
338         size_t inb_sa_sz;
339         int rc = -ENOSPC;
340         void *sa;
341
342         max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
343
344         /* Alloc NIX LF needed for single RQ */
345         req = mbox_alloc_msg_nix_lf_alloc(mbox);
346         if (req == NULL)
347                 return rc;
348         req->rq_cnt = 1;
349         req->sq_cnt = 1;
350         req->cq_cnt = 1;
351         /* XQESZ is W16 */
352         req->xqe_sz = NIX_XQESZ_W16;
353         /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
354         req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
355         req->rss_grps = ROC_NIX_RSS_GRPS;
356         req->npa_func = idev_npa_pffunc_get();
357         req->sso_func = dev->pf_func;
358         req->rx_cfg = NIX_INL_LF_RX_CFG;
359         req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
360
361         if (roc_errata_nix_has_no_drop_re())
362                 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
363
364         rc = mbox_process_msg(mbox, (void *)&rsp);
365         if (rc) {
366                 plt_err("Failed to alloc lf, rc=%d", rc);
367                 return rc;
368         }
369
370         inl_dev->lf_tx_stats = rsp->lf_tx_stats;
371         inl_dev->lf_rx_stats = rsp->lf_rx_stats;
372         inl_dev->qints = rsp->qints;
373         inl_dev->cints = rsp->cints;
374
375         /* Get VWQE info if supported */
376         if (roc_model_is_cn10k()) {
377                 mbox_alloc_msg_nix_get_hw_info(mbox);
378                 rc = mbox_process_msg(mbox, (void *)&hw_info);
379                 if (rc) {
380                         plt_err("Failed to get HW info, rc=%d", rc);
381                         goto lf_free;
382                 }
383                 inl_dev->vwqe_interval = hw_info->vwqe_delay;
384         }
385
386         /* Register nix interrupts */
387         rc = nix_inl_nix_register_irqs(inl_dev);
388         if (rc) {
389                 plt_err("Failed to register nix irq's, rc=%d", rc);
390                 goto lf_free;
391         }
392
393         /* CN9K SA is different */
394         if (roc_model_is_cn9k())
395                 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
396         else
397                 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
398
399         /* Alloc contiguous memory for Inbound SA's */
400         inl_dev->inb_sa_sz = inb_sa_sz;
401         inl_dev->inb_spi_mask = max_sa - 1;
402         inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
403                                            ROC_NIX_INL_SA_BASE_ALIGN);
404         if (!inl_dev->inb_sa_base) {
405                 plt_err("Failed to allocate memory for Inbound SA");
406                 rc = -ENOMEM;
407                 goto unregister_irqs;
408         }
409
410         if (roc_model_is_cn10k()) {
411                 for (i = 0; i < max_sa; i++) {
412                         sa = ((uint8_t *)inl_dev->inb_sa_base) +
413                              (i * inb_sa_sz);
414                         roc_ot_ipsec_inb_sa_init(sa, true);
415                 }
416         }
417         /* Setup device specific inb SA table */
418         rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
419         if (rc) {
420                 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
421                 goto free_mem;
422         }
423
424         return 0;
425 free_mem:
426         plt_free(inl_dev->inb_sa_base);
427         inl_dev->inb_sa_base = NULL;
428 unregister_irqs:
429         nix_inl_nix_unregister_irqs(inl_dev);
430 lf_free:
431         mbox_alloc_msg_nix_lf_free(mbox);
432         rc |= mbox_process(mbox);
433         return rc;
434 }
435
436 static int
437 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
438 {
439         struct dev *dev = &inl_dev->dev;
440         struct mbox *mbox = dev->mbox;
441         struct nix_lf_free_req *req;
442         struct ndc_sync_op *ndc_req;
443         int rc = -ENOSPC;
444
445         /* Disable Inbound processing */
446         rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
447         if (rc)
448                 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
449
450         /* Sync NDC-NIX for LF */
451         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
452         if (ndc_req == NULL)
453                 return rc;
454         ndc_req->nix_lf_rx_sync = 1;
455         rc = mbox_process(mbox);
456         if (rc)
457                 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
458
459         /* Unregister IRQs */
460         nix_inl_nix_unregister_irqs(inl_dev);
461
462         /* By default all associated mcam rules are deleted */
463         req = mbox_alloc_msg_nix_lf_free(mbox);
464         if (req == NULL)
465                 return -ENOSPC;
466
467         return mbox_process(mbox);
468 }
469
470 static int
471 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
472 {
473         struct msix_offset_rsp *msix_rsp;
474         struct dev *dev = &inl_dev->dev;
475         struct mbox *mbox = dev->mbox;
476         struct rsrc_attach_req *req;
477         uint64_t nix_blkaddr;
478         int rc = -ENOSPC;
479
480         req = mbox_alloc_msg_attach_resources(mbox);
481         if (req == NULL)
482                 return rc;
483         req->modify = true;
484         /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
485         req->nixlf = true;
486         req->ssow = 1;
487         req->sso = 1;
488         if (inl_dev->attach_cptlf) {
489                 req->cptlfs = 1;
490                 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
491         }
492
493         rc = mbox_process(dev->mbox);
494         if (rc)
495                 return rc;
496
497         /* Get MSIX vector offsets */
498         mbox_alloc_msg_msix_offset(mbox);
499         rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
500         if (rc)
501                 return rc;
502
503         inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
504         inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
505         inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
506         inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
507
508         nix_blkaddr = nix_get_blkaddr(dev);
509         inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
510
511         /* Update base addresses for LF's */
512         inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
513         inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
514         inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
515         inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
516
517         return 0;
518 }
519
520 static int
521 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
522 {
523         struct dev *dev = &inl_dev->dev;
524         struct mbox *mbox = dev->mbox;
525         struct rsrc_detach_req *req;
526         int rc = -ENOSPC;
527
528         req = mbox_alloc_msg_detach_resources(mbox);
529         if (req == NULL)
530                 return rc;
531         req->partial = true;
532         req->nixlf = true;
533         req->ssow = true;
534         req->sso = true;
535         req->cptlfs = !!inl_dev->attach_cptlf;
536
537         return mbox_process(dev->mbox);
538 }
539
540 static int
541 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
542 {
543         uintptr_t sso_base = inl_dev->sso_base;
544         int wait_ms = 3000;
545
546         while (wait_ms > 0) {
547                 /* Break when empty */
548                 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
549                     !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
550                         return 0;
551
552                 plt_delay_us(1000);
553                 wait_ms -= 1;
554         }
555
556         return -ETIMEDOUT;
557 }
558
559 int
560 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
561 {
562         struct idev_cfg *idev = idev_get_cfg();
563         struct nix_inl_dev *inl_dev;
564         int rc, i;
565
566         if (idev == NULL)
567                 return 0;
568
569         inl_dev = idev->nix_inl_dev;
570         /* Nothing to do if no inline device */
571         if (!inl_dev)
572                 return 0;
573
574         if (!aura_handle) {
575                 inl_dev->nb_xae = inl_dev->iue;
576                 goto no_pool;
577         }
578
579         /* Check if aura is already considered */
580         for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
581                 if (inl_dev->pkt_pools[i] == aura_handle)
582                         return 0;
583         }
584
585 no_pool:
586         /* Disable RQ if enabled */
587         if (inl_dev->rq_refs) {
588                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
589                 if (rc) {
590                         plt_err("Failed to disable inline dev RQ, rc=%d", rc);
591                         return rc;
592                 }
593         }
594
595         /* Wait for events to be removed */
596         rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
597         if (rc) {
598                 plt_err("Timeout waiting for inline device event cleanup");
599                 goto exit;
600         }
601
602         /* Disable HWGRP */
603         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
604
605         inl_dev->pkt_pools_cnt++;
606         inl_dev->pkt_pools =
607                 plt_realloc(inl_dev->pkt_pools,
608                             sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
609         if (!inl_dev->pkt_pools)
610                 inl_dev->pkt_pools_cnt = 0;
611         else
612                 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
613         inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
614
615         /* Realloc XAQ aura */
616         rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
617                                      inl_dev->nb_xae, inl_dev->xae_waes,
618                                      inl_dev->xaq_buf_size, 1);
619         if (rc) {
620                 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
621                 return rc;
622         }
623
624         /* Setup xaq for hwgrps */
625         rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
626         if (rc) {
627                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
628                 return rc;
629         }
630
631         /* Enable HWGRP */
632         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
633
634 exit:
635         /* Renable RQ */
636         if (inl_dev->rq_refs) {
637                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
638                 if (rc)
639                         plt_err("Failed to enable inline dev RQ, rc=%d", rc);
640         }
641
642         return rc;
643 }
644
645 static void
646 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
647 {
648         union roc_ot_ipsec_err_ring_head head;
649         struct roc_ot_ipsec_outb_sa *sa;
650         uint16_t head_l, tail_l;
651         uint64_t *ring_base;
652         uint32_t port_id;
653
654         port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
655         ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
656         if (!ring_base) {
657                 plt_err("Invalid soft exp ring base");
658                 return;
659         }
660
661         head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
662         head_l = head.s.head_pos;
663         tail_l = head.s.tail_pos;
664
665         while (tail_l != head_l) {
666                 union roc_ot_ipsec_err_ring_entry entry;
667                 int poll_counter = 0;
668
669                 while (poll_counter++ <
670                        ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
671                         plt_delay_us(20);
672                         entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
673                                                     __ATOMIC_ACQUIRE);
674                         if (likely(entry.u64))
675                                 break;
676                 }
677
678                 entry.u64 = plt_be_to_cpu_64(entry.u64);
679                 sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
680                                                       << 51) |
681                                                      (entry.s.data0 << 7));
682
683                 if (sa != NULL) {
684                         uint64_t tmp = ~(uint32_t)0x0;
685                         inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
686                         __atomic_store_n(ring_base + tail_l + 1, 0ULL,
687                                          __ATOMIC_RELAXED);
688                         __atomic_add_fetch((uint32_t *)ring_base, 1,
689                                            __ATOMIC_ACQ_REL);
690                 } else
691                         plt_err("Invalid SA");
692
693                 tail_l++;
694         }
695 }
696
697 static void *
698 nix_inl_outb_poll_thread(void *args)
699 {
700         struct nix_inl_dev *inl_dev = args;
701         uint32_t poll_freq;
702         uint32_t i;
703         bool bit;
704
705         poll_freq = inl_dev->soft_exp_poll_freq;
706
707         while (!soft_exp_poll_thread_exit) {
708                 if (soft_exp_consumer_cnt) {
709                         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
710                                 bit = plt_bitmap_get(
711                                         inl_dev->soft_exp_ring_bmap, i);
712                                 if (bit)
713                                         inl_outb_soft_exp_poll(inl_dev, i);
714                         }
715                 }
716                 usleep(poll_freq);
717         }
718
719         return 0;
720 }
721
722 static int
723 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
724 {
725         struct plt_bitmap *bmap;
726         size_t bmap_sz;
727         uint32_t i;
728         void *mem;
729         int rc;
730
731         /* Allocate a bitmap that pool thread uses to get the port_id
732          * that's corresponding to the inl_outb_soft_exp_ring
733          */
734         bmap_sz =
735                 plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
736         mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
737         if (mem == NULL) {
738                 plt_err("soft expiry ring bmap alloc failed");
739                 rc = -ENOMEM;
740                 goto exit;
741         }
742
743         bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
744         if (!bmap) {
745                 plt_err("soft expiry ring bmap init failed");
746                 plt_free(mem);
747                 rc = -ENOMEM;
748                 goto exit;
749         }
750
751         inl_dev->soft_exp_ring_bmap_mem = mem;
752         inl_dev->soft_exp_ring_bmap = bmap;
753         inl_dev->sa_soft_exp_ring = plt_zmalloc(
754                 ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
755         if (!inl_dev->sa_soft_exp_ring) {
756                 plt_err("soft expiry ring pointer array alloc failed");
757                 plt_free(mem);
758                 rc = -ENOMEM;
759                 goto exit;
760         }
761
762         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
763                 plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
764
765         soft_exp_consumer_cnt = 0;
766         soft_exp_poll_thread_exit = false;
767         inl_dev->soft_exp_poll_freq = 100;
768         rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread,
769                                     "OUTB_SOFT_EXP_POLL_THREAD", NULL,
770                                     nix_inl_outb_poll_thread, inl_dev);
771         if (rc) {
772                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
773                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
774         }
775
776 exit:
777         return rc;
778 }
779
780 int
781 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
782 {
783         struct plt_pci_device *pci_dev;
784         struct nix_inl_dev *inl_dev;
785         struct idev_cfg *idev;
786         int rc;
787
788         pci_dev = roc_inl_dev->pci_dev;
789
790         /* Skip probe if already done */
791         idev = idev_get_cfg();
792         if (idev == NULL)
793                 return -ENOTSUP;
794
795         if (idev->nix_inl_dev) {
796                 plt_info("Skipping device %s, inline device already probed",
797                          pci_dev->name);
798                 return -EEXIST;
799         }
800
801         PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
802
803         inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
804         memset(inl_dev, 0, sizeof(*inl_dev));
805
806         inl_dev->pci_dev = pci_dev;
807         inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
808         inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
809         inl_dev->selftest = roc_inl_dev->selftest;
810         inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
811         inl_dev->channel = roc_inl_dev->channel;
812         inl_dev->chan_mask = roc_inl_dev->chan_mask;
813         inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
814         inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
815         inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
816         inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
817
818         if (roc_inl_dev->spb_drop_pc)
819                 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
820         if (roc_inl_dev->lpb_drop_pc)
821                 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
822
823         /* Initialize base device */
824         rc = dev_init(&inl_dev->dev, pci_dev);
825         if (rc) {
826                 plt_err("Failed to init roc device");
827                 goto error;
828         }
829
830         /* Attach LF resources */
831         rc = nix_inl_lf_attach(inl_dev);
832         if (rc) {
833                 plt_err("Failed to attach LF resources, rc=%d", rc);
834                 goto dev_cleanup;
835         }
836
837         /* Setup NIX LF */
838         rc = nix_inl_nix_setup(inl_dev);
839         if (rc)
840                 goto lf_detach;
841
842         /* Setup SSO LF */
843         rc = nix_inl_sso_setup(inl_dev);
844         if (rc)
845                 goto nix_release;
846
847         /* Setup CPT LF */
848         rc = nix_inl_cpt_setup(inl_dev);
849         if (rc)
850                 goto sso_release;
851
852         if (roc_inl_dev->set_soft_exp_poll) {
853                 rc = nix_inl_outb_poll_thread_setup(inl_dev);
854                 if (rc)
855                         goto cpt_release;
856         }
857
858         /* Perform selftest if asked for */
859         if (inl_dev->selftest) {
860                 rc = nix_inl_selftest();
861                 if (rc)
862                         goto cpt_release;
863         }
864
865         idev->nix_inl_dev = inl_dev;
866
867         return 0;
868 cpt_release:
869         rc |= nix_inl_cpt_release(inl_dev);
870 sso_release:
871         rc |= nix_inl_sso_release(inl_dev);
872 nix_release:
873         rc |= nix_inl_nix_release(inl_dev);
874 lf_detach:
875         rc |= nix_inl_lf_detach(inl_dev);
876 dev_cleanup:
877         rc |= dev_fini(&inl_dev->dev, pci_dev);
878 error:
879         return rc;
880 }
881
882 int
883 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
884 {
885         struct plt_pci_device *pci_dev;
886         struct nix_inl_dev *inl_dev;
887         struct idev_cfg *idev;
888         int rc;
889
890         idev = idev_get_cfg();
891         if (idev == NULL)
892                 return 0;
893
894         if (!idev->nix_inl_dev ||
895             PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
896                 return 0;
897
898         inl_dev = idev->nix_inl_dev;
899         pci_dev = inl_dev->pci_dev;
900
901         if (roc_inl_dev->set_soft_exp_poll) {
902                 soft_exp_poll_thread_exit = true;
903                 pthread_join(inl_dev->soft_exp_poll_thread, NULL);
904                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
905                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
906                 plt_free(inl_dev->sa_soft_exp_ring);
907         }
908
909         /* Flush Inbound CTX cache entries */
910         nix_inl_cpt_ctx_cache_sync(inl_dev);
911
912         /* Release SSO */
913         rc = nix_inl_sso_release(inl_dev);
914
915         /* Release NIX */
916         rc |= nix_inl_nix_release(inl_dev);
917
918         /* Detach LF's */
919         rc |= nix_inl_lf_detach(inl_dev);
920
921         /* Cleanup mbox */
922         rc |= dev_fini(&inl_dev->dev, pci_dev);
923         if (rc)
924                 return rc;
925
926         idev->nix_inl_dev = NULL;
927         return 0;
928 }