net/iavf: fix potential out-of-bounds access
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #include <unistd.h>
9
10 #define NIX_AURA_DROP_PC_DFLT 40
11
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14         (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15          ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16          ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
17          ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
18          ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
19
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26         struct idev_cfg *idev = idev_get_cfg();
27         struct nix_inl_dev *inl_dev;
28
29         if (idev != NULL) {
30                 inl_dev = idev->nix_inl_dev;
31                 if (inl_dev)
32                         return inl_dev->dev.pf_func;
33         }
34         return 0;
35 }
36
37 uint16_t
38 roc_nix_inl_dev_pffunc_get(void)
39 {
40         return nix_inl_dev_pffunc_get();
41 }
42
43 static void
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
45 {
46         uintptr_t work = gw[1];
47
48         (void)soft_exp_event;
49         *((uintptr_t *)args + (gw[0] & 0x1)) = work;
50
51         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
52 }
53
54 static int
55 nix_inl_selftest(void)
56 {
57         struct idev_cfg *idev = idev_get_cfg();
58         roc_nix_inl_sso_work_cb_t save_cb;
59         static uintptr_t work_arr[2];
60         struct nix_inl_dev *inl_dev;
61         void *save_cb_args;
62         uint64_t add_work0;
63         int rc = 0;
64
65         if (idev == NULL)
66                 return -ENOTSUP;
67
68         inl_dev = idev->nix_inl_dev;
69         if (inl_dev == NULL)
70                 return -ENOTSUP;
71
72         plt_info("Performing nix inl self test");
73
74         /* Save and update cb to test cb */
75         save_cb = inl_dev->work_cb;
76         save_cb_args = inl_dev->cb_args;
77         inl_dev->work_cb = nix_inl_selftest_work_cb;
78         inl_dev->cb_args = work_arr;
79
80         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
81
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
84
85         /* Add work */
86         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87         roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89         roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
90
91         plt_delay_ms(10000);
92
93         /* Check if we got expected work */
94         if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95                 plt_err("Failed to get expected work, [0]=%p [1]=%p",
96                         (void *)work_arr[0], (void *)work_arr[1]);
97                 rc = -EFAULT;
98                 goto exit;
99         }
100
101         plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102                  (void *)work_arr[1]);
103
104 exit:
105         /* Restore state */
106         inl_dev->work_cb = save_cb;
107         inl_dev->cb_args = save_cb_args;
108         return rc;
109 }
110
111 static int
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
113 {
114         struct mbox *mbox = (&inl_dev->dev)->mbox;
115         struct msg_req *req;
116
117         req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
118         if (req == NULL)
119                 return -ENOSPC;
120
121         return mbox_process(mbox);
122 }
123
124 static int
125 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
126 {
127         struct nix_inline_ipsec_lf_cfg *lf_cfg;
128         struct mbox *mbox = (&inl_dev->dev)->mbox;
129         uint64_t max_sa;
130         uint32_t sa_w;
131
132         lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
133         if (lf_cfg == NULL)
134                 return -ENOSPC;
135
136         if (ena) {
137
138                 max_sa = inl_dev->inb_spi_mask + 1;
139                 sa_w = plt_log2_u32(max_sa);
140
141                 lf_cfg->enable = 1;
142                 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
143                 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
144                 /* CN9K SA size is different */
145                 if (roc_model_is_cn9k())
146                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
147                 else
148                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
149                 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
150                 lf_cfg->ipsec_cfg0.sa_pow2_size =
151                         plt_log2_u32(inl_dev->inb_sa_sz);
152
153                 lf_cfg->ipsec_cfg0.tag_const = 0;
154                 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
155         } else {
156                 lf_cfg->enable = 0;
157         }
158
159         return mbox_process(mbox);
160 }
161
162 static int
163 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
164 {
165         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
166         struct dev *dev = &inl_dev->dev;
167         uint8_t eng_grpmask;
168         int rc;
169
170         if (!inl_dev->attach_cptlf)
171                 return 0;
172
173         /* Alloc CPT LF */
174         eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
175                        1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
176                        1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
177         rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
178         if (rc) {
179                 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
180                 return rc;
181         }
182
183         /* Setup CPT LF for submitting control opcode */
184         lf = &inl_dev->cpt_lf;
185         lf->lf_id = 0;
186         lf->nb_desc = 0; /* Set to default */
187         lf->dev = &inl_dev->dev;
188         lf->msixoff = inl_dev->cpt_msixoff;
189         lf->pci_dev = inl_dev->pci_dev;
190
191         rc = cpt_lf_init(lf);
192         if (rc) {
193                 plt_err("Failed to initialize CPT LF, rc=%d", rc);
194                 goto lf_free;
195         }
196
197         roc_cpt_iq_enable(lf);
198         return 0;
199 lf_free:
200         rc |= cpt_lfs_free(dev);
201         return rc;
202 }
203
204 static int
205 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
206 {
207         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
208         struct dev *dev = &inl_dev->dev;
209         int rc, ret = 0;
210
211         if (!inl_dev->attach_cptlf)
212                 return 0;
213
214         /* Cleanup CPT LF queue */
215         cpt_lf_fini(lf);
216
217         /* Free LF resources */
218         rc = cpt_lfs_free(dev);
219         if (rc)
220                 plt_err("Failed to free CPT LF resources, rc=%d", rc);
221         ret |= rc;
222
223         /* Detach LF */
224         rc = cpt_lfs_detach(dev);
225         if (rc)
226                 plt_err("Failed to detach CPT LF, rc=%d", rc);
227         ret |= rc;
228
229         return ret;
230 }
231
232 static int
233 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
234 {
235         struct sso_lf_alloc_rsp *sso_rsp;
236         struct dev *dev = &inl_dev->dev;
237         uint16_t hwgrp[1] = {0};
238         int rc;
239
240         /* Alloc SSOW LF */
241         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
242         if (rc) {
243                 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
244                 return rc;
245         }
246
247         /* Alloc HWGRP LF */
248         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
249         if (rc) {
250                 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
251                 goto free_ssow;
252         }
253
254         inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
255         inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
256         inl_dev->iue = sso_rsp->in_unit_entries;
257
258         inl_dev->nb_xae = inl_dev->iue;
259         rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
260                                      inl_dev->xae_waes, inl_dev->xaq_buf_size,
261                                      1);
262         if (rc) {
263                 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
264                 goto free_sso;
265         }
266
267         /* Setup xaq for hwgrps */
268         rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
269         if (rc) {
270                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
271                 goto destroy_pool;
272         }
273
274         /* Register SSO, SSOW error and work irq's */
275         rc = nix_inl_sso_register_irqs(inl_dev);
276         if (rc) {
277                 plt_err("Failed to register sso irq's, rc=%d", rc);
278                 goto release_xaq;
279         }
280
281         /* Setup hwgrp->hws link */
282         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
283
284         /* Enable HWGRP */
285         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
286
287         return 0;
288
289 release_xaq:
290         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
291 destroy_pool:
292         sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
293 free_sso:
294         sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
295 free_ssow:
296         sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
297         return rc;
298 }
299
300 static int
301 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
302 {
303         uint16_t hwgrp[1] = {0};
304
305         /* Disable HWGRP */
306         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
307
308         /* Unregister SSO/SSOW IRQ's */
309         nix_inl_sso_unregister_irqs(inl_dev);
310
311         /* Unlink hws */
312         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
313
314         /* Release XAQ aura */
315         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
316
317         /* Free SSO, SSOW LF's */
318         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
319         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
320
321         /* Free the XAQ aura */
322         sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
323
324         return 0;
325 }
326
327 static int
328 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
329 {
330         uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
331         uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
332         struct dev *dev = &inl_dev->dev;
333         struct mbox *mbox = dev->mbox;
334         struct nix_lf_alloc_rsp *rsp;
335         struct nix_lf_alloc_req *req;
336         struct nix_hw_info *hw_info;
337         uint64_t max_sa, i;
338         size_t inb_sa_sz;
339         int rc = -ENOSPC;
340         void *sa;
341
342         max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
343
344         /* Alloc NIX LF needed for single RQ */
345         req = mbox_alloc_msg_nix_lf_alloc(mbox);
346         if (req == NULL)
347                 return rc;
348         req->rq_cnt = 1;
349         req->sq_cnt = 1;
350         req->cq_cnt = 1;
351         /* XQESZ is W16 */
352         req->xqe_sz = NIX_XQESZ_W16;
353         /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
354         req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
355         req->rss_grps = ROC_NIX_RSS_GRPS;
356         req->npa_func = idev_npa_pffunc_get();
357         req->sso_func = dev->pf_func;
358         req->rx_cfg = NIX_INL_LF_RX_CFG;
359         req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
360
361         if (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
362             roc_model_is_cnf10kb_a0())
363                 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
364
365         rc = mbox_process_msg(mbox, (void *)&rsp);
366         if (rc) {
367                 plt_err("Failed to alloc lf, rc=%d", rc);
368                 return rc;
369         }
370
371         inl_dev->lf_tx_stats = rsp->lf_tx_stats;
372         inl_dev->lf_rx_stats = rsp->lf_rx_stats;
373         inl_dev->qints = rsp->qints;
374         inl_dev->cints = rsp->cints;
375
376         /* Get VWQE info if supported */
377         if (roc_model_is_cn10k()) {
378                 mbox_alloc_msg_nix_get_hw_info(mbox);
379                 rc = mbox_process_msg(mbox, (void *)&hw_info);
380                 if (rc) {
381                         plt_err("Failed to get HW info, rc=%d", rc);
382                         goto lf_free;
383                 }
384                 inl_dev->vwqe_interval = hw_info->vwqe_delay;
385         }
386
387         /* Register nix interrupts */
388         rc = nix_inl_nix_register_irqs(inl_dev);
389         if (rc) {
390                 plt_err("Failed to register nix irq's, rc=%d", rc);
391                 goto lf_free;
392         }
393
394         /* CN9K SA is different */
395         if (roc_model_is_cn9k())
396                 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
397         else
398                 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
399
400         /* Alloc contiguous memory for Inbound SA's */
401         inl_dev->inb_sa_sz = inb_sa_sz;
402         inl_dev->inb_spi_mask = max_sa - 1;
403         inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
404                                            ROC_NIX_INL_SA_BASE_ALIGN);
405         if (!inl_dev->inb_sa_base) {
406                 plt_err("Failed to allocate memory for Inbound SA");
407                 rc = -ENOMEM;
408                 goto unregister_irqs;
409         }
410
411         if (roc_model_is_cn10k()) {
412                 for (i = 0; i < max_sa; i++) {
413                         sa = ((uint8_t *)inl_dev->inb_sa_base) +
414                              (i * inb_sa_sz);
415                         roc_ot_ipsec_inb_sa_init(sa, true);
416                 }
417         }
418         /* Setup device specific inb SA table */
419         rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
420         if (rc) {
421                 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
422                 goto free_mem;
423         }
424
425         return 0;
426 free_mem:
427         plt_free(inl_dev->inb_sa_base);
428         inl_dev->inb_sa_base = NULL;
429 unregister_irqs:
430         nix_inl_nix_unregister_irqs(inl_dev);
431 lf_free:
432         mbox_alloc_msg_nix_lf_free(mbox);
433         rc |= mbox_process(mbox);
434         return rc;
435 }
436
437 static int
438 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
439 {
440         struct dev *dev = &inl_dev->dev;
441         struct mbox *mbox = dev->mbox;
442         struct nix_lf_free_req *req;
443         struct ndc_sync_op *ndc_req;
444         int rc = -ENOSPC;
445
446         /* Disable Inbound processing */
447         rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
448         if (rc)
449                 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
450
451         /* Sync NDC-NIX for LF */
452         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
453         if (ndc_req == NULL)
454                 return rc;
455         ndc_req->nix_lf_rx_sync = 1;
456         rc = mbox_process(mbox);
457         if (rc)
458                 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
459
460         /* Unregister IRQs */
461         nix_inl_nix_unregister_irqs(inl_dev);
462
463         /* By default all associated mcam rules are deleted */
464         req = mbox_alloc_msg_nix_lf_free(mbox);
465         if (req == NULL)
466                 return -ENOSPC;
467
468         return mbox_process(mbox);
469 }
470
471 static int
472 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
473 {
474         struct msix_offset_rsp *msix_rsp;
475         struct dev *dev = &inl_dev->dev;
476         struct mbox *mbox = dev->mbox;
477         struct rsrc_attach_req *req;
478         uint64_t nix_blkaddr;
479         int rc = -ENOSPC;
480
481         req = mbox_alloc_msg_attach_resources(mbox);
482         if (req == NULL)
483                 return rc;
484         req->modify = true;
485         /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
486         req->nixlf = true;
487         req->ssow = 1;
488         req->sso = 1;
489         if (inl_dev->attach_cptlf) {
490                 req->cptlfs = 1;
491                 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
492         }
493
494         rc = mbox_process(dev->mbox);
495         if (rc)
496                 return rc;
497
498         /* Get MSIX vector offsets */
499         mbox_alloc_msg_msix_offset(mbox);
500         rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
501         if (rc)
502                 return rc;
503
504         inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
505         inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
506         inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
507         inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
508
509         nix_blkaddr = nix_get_blkaddr(dev);
510         inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
511
512         /* Update base addresses for LF's */
513         inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
514         inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
515         inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
516         inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
517
518         return 0;
519 }
520
521 static int
522 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
523 {
524         struct dev *dev = &inl_dev->dev;
525         struct mbox *mbox = dev->mbox;
526         struct rsrc_detach_req *req;
527         int rc = -ENOSPC;
528
529         req = mbox_alloc_msg_detach_resources(mbox);
530         if (req == NULL)
531                 return rc;
532         req->partial = true;
533         req->nixlf = true;
534         req->ssow = true;
535         req->sso = true;
536         req->cptlfs = !!inl_dev->attach_cptlf;
537
538         return mbox_process(dev->mbox);
539 }
540
541 static int
542 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
543 {
544         uintptr_t sso_base = inl_dev->sso_base;
545         int wait_ms = 3000;
546
547         while (wait_ms > 0) {
548                 /* Break when empty */
549                 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
550                     !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
551                         return 0;
552
553                 plt_delay_us(1000);
554                 wait_ms -= 1;
555         }
556
557         return -ETIMEDOUT;
558 }
559
560 int
561 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
562 {
563         struct idev_cfg *idev = idev_get_cfg();
564         struct nix_inl_dev *inl_dev;
565         int rc, i;
566
567         if (idev == NULL)
568                 return 0;
569
570         inl_dev = idev->nix_inl_dev;
571         /* Nothing to do if no inline device */
572         if (!inl_dev)
573                 return 0;
574
575         if (!aura_handle) {
576                 inl_dev->nb_xae = inl_dev->iue;
577                 goto no_pool;
578         }
579
580         /* Check if aura is already considered */
581         for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
582                 if (inl_dev->pkt_pools[i] == aura_handle)
583                         return 0;
584         }
585
586 no_pool:
587         /* Disable RQ if enabled */
588         if (inl_dev->rq_refs) {
589                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
590                 if (rc) {
591                         plt_err("Failed to disable inline dev RQ, rc=%d", rc);
592                         return rc;
593                 }
594         }
595
596         /* Wait for events to be removed */
597         rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
598         if (rc) {
599                 plt_err("Timeout waiting for inline device event cleanup");
600                 goto exit;
601         }
602
603         /* Disable HWGRP */
604         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
605
606         inl_dev->pkt_pools_cnt++;
607         inl_dev->pkt_pools =
608                 plt_realloc(inl_dev->pkt_pools,
609                             sizeof(uint64_t *) * inl_dev->pkt_pools_cnt, 0);
610         if (!inl_dev->pkt_pools)
611                 inl_dev->pkt_pools_cnt = 0;
612         else
613                 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
614         inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
615
616         /* Realloc XAQ aura */
617         rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
618                                      inl_dev->nb_xae, inl_dev->xae_waes,
619                                      inl_dev->xaq_buf_size, 1);
620         if (rc) {
621                 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
622                 return rc;
623         }
624
625         /* Setup xaq for hwgrps */
626         rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
627         if (rc) {
628                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
629                 return rc;
630         }
631
632         /* Enable HWGRP */
633         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
634
635 exit:
636         /* Renable RQ */
637         if (inl_dev->rq_refs) {
638                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
639                 if (rc)
640                         plt_err("Failed to enable inline dev RQ, rc=%d", rc);
641         }
642
643         return rc;
644 }
645
646 static void
647 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
648 {
649         union roc_ot_ipsec_err_ring_head head;
650         struct roc_ot_ipsec_outb_sa *sa;
651         uint16_t head_l, tail_l;
652         uint64_t *ring_base;
653         uint32_t port_id;
654
655         port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
656         ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
657         if (!ring_base) {
658                 plt_err("Invalid soft exp ring base");
659                 return;
660         }
661
662         head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
663         head_l = head.s.head_pos;
664         tail_l = head.s.tail_pos;
665
666         while (tail_l != head_l) {
667                 union roc_ot_ipsec_err_ring_entry entry;
668                 int poll_counter = 0;
669
670                 while (poll_counter++ <
671                        ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
672                         plt_delay_us(20);
673                         entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
674                                                     __ATOMIC_ACQUIRE);
675                         if (likely(entry.u64))
676                                 break;
677                 }
678
679                 entry.u64 = plt_be_to_cpu_64(entry.u64);
680                 sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
681                                                       << 51) |
682                                                      (entry.s.data0 << 7));
683
684                 if (sa != NULL) {
685                         uint64_t tmp = ~(uint32_t)0x0;
686                         inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
687                         __atomic_store_n(ring_base + tail_l + 1, 0ULL,
688                                          __ATOMIC_RELAXED);
689                         __atomic_add_fetch((uint32_t *)ring_base, 1,
690                                            __ATOMIC_ACQ_REL);
691                 } else
692                         plt_err("Invalid SA");
693
694                 tail_l++;
695         }
696 }
697
698 static void *
699 nix_inl_outb_poll_thread(void *args)
700 {
701         struct nix_inl_dev *inl_dev = args;
702         uint32_t poll_freq;
703         uint32_t i;
704         bool bit;
705
706         poll_freq = inl_dev->soft_exp_poll_freq;
707
708         while (!soft_exp_poll_thread_exit) {
709                 if (soft_exp_consumer_cnt) {
710                         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
711                                 bit = plt_bitmap_get(
712                                         inl_dev->soft_exp_ring_bmap, i);
713                                 if (bit)
714                                         inl_outb_soft_exp_poll(inl_dev, i);
715                         }
716                 }
717                 usleep(poll_freq);
718         }
719
720         return 0;
721 }
722
723 static int
724 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
725 {
726         struct plt_bitmap *bmap;
727         size_t bmap_sz;
728         uint32_t i;
729         void *mem;
730         int rc;
731
732         /* Allocate a bitmap that pool thread uses to get the port_id
733          * that's corresponding to the inl_outb_soft_exp_ring
734          */
735         bmap_sz =
736                 plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
737         mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
738         if (mem == NULL) {
739                 plt_err("soft expiry ring bmap alloc failed");
740                 rc = -ENOMEM;
741                 goto exit;
742         }
743
744         bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
745         if (!bmap) {
746                 plt_err("soft expiry ring bmap init failed");
747                 plt_free(mem);
748                 rc = -ENOMEM;
749                 goto exit;
750         }
751
752         inl_dev->soft_exp_ring_bmap_mem = mem;
753         inl_dev->soft_exp_ring_bmap = bmap;
754         inl_dev->sa_soft_exp_ring = plt_zmalloc(
755                 ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
756         if (!inl_dev->sa_soft_exp_ring) {
757                 plt_err("soft expiry ring pointer array alloc failed");
758                 plt_free(mem);
759                 rc = -ENOMEM;
760                 goto exit;
761         }
762
763         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
764                 plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
765
766         soft_exp_consumer_cnt = 0;
767         soft_exp_poll_thread_exit = false;
768         inl_dev->soft_exp_poll_freq = 100;
769         rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread,
770                                     "OUTB_SOFT_EXP_POLL_THREAD", NULL,
771                                     nix_inl_outb_poll_thread, inl_dev);
772         if (rc) {
773                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
774                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
775         }
776
777 exit:
778         return rc;
779 }
780
781 int
782 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
783 {
784         struct plt_pci_device *pci_dev;
785         struct nix_inl_dev *inl_dev;
786         struct idev_cfg *idev;
787         int rc;
788
789         pci_dev = roc_inl_dev->pci_dev;
790
791         /* Skip probe if already done */
792         idev = idev_get_cfg();
793         if (idev == NULL)
794                 return -ENOTSUP;
795
796         if (idev->nix_inl_dev) {
797                 plt_info("Skipping device %s, inline device already probed",
798                          pci_dev->name);
799                 return -EEXIST;
800         }
801
802         PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
803
804         inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
805         memset(inl_dev, 0, sizeof(*inl_dev));
806
807         inl_dev->pci_dev = pci_dev;
808         inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
809         inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
810         inl_dev->selftest = roc_inl_dev->selftest;
811         inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
812         inl_dev->channel = roc_inl_dev->channel;
813         inl_dev->chan_mask = roc_inl_dev->chan_mask;
814         inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
815         inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
816         inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
817         inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
818
819         if (roc_inl_dev->spb_drop_pc)
820                 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
821         if (roc_inl_dev->lpb_drop_pc)
822                 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
823
824         /* Initialize base device */
825         rc = dev_init(&inl_dev->dev, pci_dev);
826         if (rc) {
827                 plt_err("Failed to init roc device");
828                 goto error;
829         }
830
831         /* Attach LF resources */
832         rc = nix_inl_lf_attach(inl_dev);
833         if (rc) {
834                 plt_err("Failed to attach LF resources, rc=%d", rc);
835                 goto dev_cleanup;
836         }
837
838         /* Setup NIX LF */
839         rc = nix_inl_nix_setup(inl_dev);
840         if (rc)
841                 goto lf_detach;
842
843         /* Setup SSO LF */
844         rc = nix_inl_sso_setup(inl_dev);
845         if (rc)
846                 goto nix_release;
847
848         /* Setup CPT LF */
849         rc = nix_inl_cpt_setup(inl_dev);
850         if (rc)
851                 goto sso_release;
852
853         if (roc_inl_dev->set_soft_exp_poll) {
854                 rc = nix_inl_outb_poll_thread_setup(inl_dev);
855                 if (rc)
856                         goto cpt_release;
857         }
858
859         /* Perform selftest if asked for */
860         if (inl_dev->selftest) {
861                 rc = nix_inl_selftest();
862                 if (rc)
863                         goto cpt_release;
864         }
865
866         idev->nix_inl_dev = inl_dev;
867
868         return 0;
869 cpt_release:
870         rc |= nix_inl_cpt_release(inl_dev);
871 sso_release:
872         rc |= nix_inl_sso_release(inl_dev);
873 nix_release:
874         rc |= nix_inl_nix_release(inl_dev);
875 lf_detach:
876         rc |= nix_inl_lf_detach(inl_dev);
877 dev_cleanup:
878         rc |= dev_fini(&inl_dev->dev, pci_dev);
879 error:
880         return rc;
881 }
882
883 int
884 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
885 {
886         struct plt_pci_device *pci_dev;
887         struct nix_inl_dev *inl_dev;
888         struct idev_cfg *idev;
889         int rc;
890
891         idev = idev_get_cfg();
892         if (idev == NULL)
893                 return 0;
894
895         if (!idev->nix_inl_dev ||
896             PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
897                 return 0;
898
899         inl_dev = idev->nix_inl_dev;
900         pci_dev = inl_dev->pci_dev;
901
902         if (roc_inl_dev->set_soft_exp_poll) {
903                 soft_exp_poll_thread_exit = true;
904                 pthread_join(inl_dev->soft_exp_poll_thread, NULL);
905                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
906                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
907                 plt_free(inl_dev->sa_soft_exp_ring);
908         }
909
910         /* Flush Inbound CTX cache entries */
911         nix_inl_cpt_ctx_cache_sync(inl_dev);
912
913         /* Release SSO */
914         rc = nix_inl_sso_release(inl_dev);
915
916         /* Release NIX */
917         rc |= nix_inl_nix_release(inl_dev);
918
919         /* Detach LF's */
920         rc |= nix_inl_lf_detach(inl_dev);
921
922         /* Cleanup mbox */
923         rc |= dev_fini(&inl_dev->dev, pci_dev);
924         if (rc)
925                 return rc;
926
927         idev->nix_inl_dev = NULL;
928         return 0;
929 }