common/cnxk: add lower bound check for SSO resources
[dpdk.git] / drivers / common / cnxk / roc_nix_inl_dev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "roc_api.h"
6 #include "roc_priv.h"
7
8 #include <unistd.h>
9
10 #define NIX_AURA_DROP_PC_DFLT 40
11
12 /* Default Rx Config for Inline NIX LF */
13 #define NIX_INL_LF_RX_CFG                                                      \
14         (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
15          ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
16          ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
17          ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
18          ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
19
20 extern uint32_t soft_exp_consumer_cnt;
21 static bool soft_exp_poll_thread_exit = true;
22
23 uint16_t
24 nix_inl_dev_pffunc_get(void)
25 {
26         struct idev_cfg *idev = idev_get_cfg();
27         struct nix_inl_dev *inl_dev;
28
29         if (idev != NULL) {
30                 inl_dev = idev->nix_inl_dev;
31                 if (inl_dev)
32                         return inl_dev->dev.pf_func;
33         }
34         return 0;
35 }
36
37 uint16_t
38 roc_nix_inl_dev_pffunc_get(void)
39 {
40         return nix_inl_dev_pffunc_get();
41 }
42
43 static void
44 nix_inl_selftest_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
45 {
46         uintptr_t work = gw[1];
47
48         (void)soft_exp_event;
49         *((uintptr_t *)args + (gw[0] & 0x1)) = work;
50
51         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
52 }
53
54 static int
55 nix_inl_selftest(void)
56 {
57         struct idev_cfg *idev = idev_get_cfg();
58         roc_nix_inl_sso_work_cb_t save_cb;
59         static uintptr_t work_arr[2];
60         struct nix_inl_dev *inl_dev;
61         void *save_cb_args;
62         uint64_t add_work0;
63         int rc = 0;
64
65         if (idev == NULL)
66                 return -ENOTSUP;
67
68         inl_dev = idev->nix_inl_dev;
69         if (inl_dev == NULL)
70                 return -ENOTSUP;
71
72         plt_info("Performing nix inl self test");
73
74         /* Save and update cb to test cb */
75         save_cb = inl_dev->work_cb;
76         save_cb_args = inl_dev->cb_args;
77         inl_dev->work_cb = nix_inl_selftest_work_cb;
78         inl_dev->cb_args = work_arr;
79
80         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
81
82 #define WORK_MAGIC1 0x335577ff0
83 #define WORK_MAGIC2 0xdeadbeef0
84
85         /* Add work */
86         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
87         roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
88         add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
89         roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
90
91         plt_delay_ms(10000);
92
93         /* Check if we got expected work */
94         if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
95                 plt_err("Failed to get expected work, [0]=%p [1]=%p",
96                         (void *)work_arr[0], (void *)work_arr[1]);
97                 rc = -EFAULT;
98                 goto exit;
99         }
100
101         plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
102                  (void *)work_arr[1]);
103
104 exit:
105         /* Restore state */
106         inl_dev->work_cb = save_cb;
107         inl_dev->cb_args = save_cb_args;
108         return rc;
109 }
110
111 static int
112 nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
113 {
114         struct mbox *mbox = (&inl_dev->dev)->mbox;
115         struct msg_req *req;
116
117         req = mbox_alloc_msg_cpt_ctx_cache_sync(mbox);
118         if (req == NULL)
119                 return -ENOSPC;
120
121         return mbox_process(mbox);
122 }
123
124 static int
125 nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
126 {
127         struct nix_inline_ipsec_lf_cfg *lf_cfg;
128         struct mbox *mbox = (&inl_dev->dev)->mbox;
129         uint64_t max_sa;
130         uint32_t sa_w;
131
132         lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
133         if (lf_cfg == NULL)
134                 return -ENOSPC;
135
136         if (ena) {
137
138                 max_sa = inl_dev->inb_spi_mask + 1;
139                 sa_w = plt_log2_u32(max_sa);
140
141                 lf_cfg->enable = 1;
142                 lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
143                 lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
144                 /* CN9K SA size is different */
145                 if (roc_model_is_cn9k())
146                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
147                 else
148                         lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
149                 lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
150                 lf_cfg->ipsec_cfg0.sa_pow2_size =
151                         plt_log2_u32(inl_dev->inb_sa_sz);
152
153                 lf_cfg->ipsec_cfg0.tag_const = 0;
154                 lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
155         } else {
156                 lf_cfg->enable = 0;
157         }
158
159         return mbox_process(mbox);
160 }
161
162 static int
163 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev)
164 {
165         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
166         struct dev *dev = &inl_dev->dev;
167         uint8_t eng_grpmask;
168         int rc;
169
170         if (!inl_dev->attach_cptlf)
171                 return 0;
172
173         /* Alloc CPT LF */
174         eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
175                        1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
176                        1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
177         rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, false);
178         if (rc) {
179                 plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
180                 return rc;
181         }
182
183         /* Setup CPT LF for submitting control opcode */
184         lf = &inl_dev->cpt_lf;
185         lf->lf_id = 0;
186         lf->nb_desc = 0; /* Set to default */
187         lf->dev = &inl_dev->dev;
188         lf->msixoff = inl_dev->cpt_msixoff;
189         lf->pci_dev = inl_dev->pci_dev;
190
191         rc = cpt_lf_init(lf);
192         if (rc) {
193                 plt_err("Failed to initialize CPT LF, rc=%d", rc);
194                 goto lf_free;
195         }
196
197         roc_cpt_iq_enable(lf);
198         return 0;
199 lf_free:
200         rc |= cpt_lfs_free(dev);
201         return rc;
202 }
203
204 static int
205 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
206 {
207         struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
208         struct dev *dev = &inl_dev->dev;
209         int rc, ret = 0;
210
211         if (!inl_dev->attach_cptlf)
212                 return 0;
213
214         /* Cleanup CPT LF queue */
215         cpt_lf_fini(lf);
216
217         /* Free LF resources */
218         rc = cpt_lfs_free(dev);
219         if (rc)
220                 plt_err("Failed to free CPT LF resources, rc=%d", rc);
221         ret |= rc;
222
223         /* Detach LF */
224         rc = cpt_lfs_detach(dev);
225         if (rc)
226                 plt_err("Failed to detach CPT LF, rc=%d", rc);
227         ret |= rc;
228
229         return ret;
230 }
231
232 static int
233 nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
234 {
235         struct sso_lf_alloc_rsp *sso_rsp;
236         struct dev *dev = &inl_dev->dev;
237         uint16_t hwgrp[1] = {0};
238         int rc;
239
240         /* Alloc SSOW LF */
241         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
242         if (rc) {
243                 plt_err("Failed to alloc SSO HWS, rc=%d", rc);
244                 return rc;
245         }
246
247         /* Alloc HWGRP LF */
248         rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
249         if (rc) {
250                 plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
251                 goto free_ssow;
252         }
253
254         inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
255         inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
256         inl_dev->iue = sso_rsp->in_unit_entries;
257
258         inl_dev->nb_xae = inl_dev->iue;
259         rc = sso_hwgrp_init_xaq_aura(dev, &inl_dev->xaq, inl_dev->nb_xae,
260                                      inl_dev->xae_waes, inl_dev->xaq_buf_size,
261                                      1);
262         if (rc) {
263                 plt_err("Failed to alloc SSO XAQ aura, rc=%d", rc);
264                 goto free_sso;
265         }
266
267         /* Setup xaq for hwgrps */
268         rc = sso_hwgrp_alloc_xaq(dev, inl_dev->xaq.aura_handle, 1);
269         if (rc) {
270                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
271                 goto destroy_pool;
272         }
273
274         /* Register SSO, SSOW error and work irq's */
275         rc = nix_inl_sso_register_irqs(inl_dev);
276         if (rc) {
277                 plt_err("Failed to register sso irq's, rc=%d", rc);
278                 goto release_xaq;
279         }
280
281         /* Setup hwgrp->hws link */
282         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
283
284         /* Enable HWGRP */
285         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
286
287         return 0;
288
289 release_xaq:
290         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
291 destroy_pool:
292         sso_hwgrp_free_xaq_aura(dev, &inl_dev->xaq, 0);
293 free_sso:
294         sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
295 free_ssow:
296         sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
297         return rc;
298 }
299
300 static int
301 nix_inl_sso_release(struct nix_inl_dev *inl_dev)
302 {
303         uint16_t hwgrp[1] = {0};
304
305         /* Disable HWGRP */
306         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
307
308         /* Unregister SSO/SSOW IRQ's */
309         nix_inl_sso_unregister_irqs(inl_dev);
310
311         /* Unlink hws */
312         sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
313
314         /* Release XAQ aura */
315         sso_hwgrp_release_xaq(&inl_dev->dev, 1);
316
317         /* Free SSO, SSOW LF's */
318         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
319         sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
320
321         /* Free the XAQ aura */
322         sso_hwgrp_free_xaq_aura(&inl_dev->dev, &inl_dev->xaq, 0);
323
324         return 0;
325 }
326
327 static int
328 nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
329 {
330         uint32_t ipsec_in_min_spi = inl_dev->ipsec_in_min_spi;
331         uint32_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
332         struct dev *dev = &inl_dev->dev;
333         struct mbox *mbox = dev->mbox;
334         struct nix_lf_alloc_rsp *rsp;
335         struct nix_lf_alloc_req *req;
336         struct nix_hw_info *hw_info;
337         struct roc_nix_rq *rqs;
338         uint64_t max_sa, i;
339         size_t inb_sa_sz;
340         int rc = -ENOSPC;
341         void *sa;
342
343         max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
344
345         /* Alloc NIX LF needed for single RQ */
346         req = mbox_alloc_msg_nix_lf_alloc(mbox);
347         if (req == NULL)
348                 return rc;
349         /* We will have per-port RQ if it is not with channel masking */
350         req->rq_cnt = inl_dev->nb_rqs;
351         req->sq_cnt = 1;
352         req->cq_cnt = 1;
353         /* XQESZ is W16 */
354         req->xqe_sz = NIX_XQESZ_W16;
355         /* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
356         req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
357         req->rss_grps = ROC_NIX_RSS_GRPS;
358         req->npa_func = idev_npa_pffunc_get();
359         req->sso_func = dev->pf_func;
360         req->rx_cfg = NIX_INL_LF_RX_CFG;
361         req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
362
363         if (roc_errata_nix_has_no_drop_re())
364                 req->rx_cfg &= ~ROC_NIX_LF_RX_CFG_DROP_RE;
365
366         rc = mbox_process_msg(mbox, (void *)&rsp);
367         if (rc) {
368                 plt_err("Failed to alloc lf, rc=%d", rc);
369                 return rc;
370         }
371
372         inl_dev->lf_tx_stats = rsp->lf_tx_stats;
373         inl_dev->lf_rx_stats = rsp->lf_rx_stats;
374         inl_dev->qints = rsp->qints;
375         inl_dev->cints = rsp->cints;
376
377         /* Get VWQE info if supported */
378         if (roc_model_is_cn10k()) {
379                 mbox_alloc_msg_nix_get_hw_info(mbox);
380                 rc = mbox_process_msg(mbox, (void *)&hw_info);
381                 if (rc) {
382                         plt_err("Failed to get HW info, rc=%d", rc);
383                         goto lf_free;
384                 }
385                 inl_dev->vwqe_interval = hw_info->vwqe_delay;
386         }
387
388         /* Register nix interrupts */
389         rc = nix_inl_nix_register_irqs(inl_dev);
390         if (rc) {
391                 plt_err("Failed to register nix irq's, rc=%d", rc);
392                 goto lf_free;
393         }
394
395         /* CN9K SA is different */
396         if (roc_model_is_cn9k())
397                 inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
398         else
399                 inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
400
401         /* Alloc contiguous memory for Inbound SA's */
402         inl_dev->inb_sa_sz = inb_sa_sz;
403         inl_dev->inb_spi_mask = max_sa - 1;
404         inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
405                                            ROC_NIX_INL_SA_BASE_ALIGN);
406         if (!inl_dev->inb_sa_base) {
407                 plt_err("Failed to allocate memory for Inbound SA");
408                 rc = -ENOMEM;
409                 goto unregister_irqs;
410         }
411
412         if (roc_model_is_cn10k()) {
413                 for (i = 0; i < max_sa; i++) {
414                         sa = ((uint8_t *)inl_dev->inb_sa_base) +
415                              (i * inb_sa_sz);
416                         roc_ot_ipsec_inb_sa_init(sa, true);
417                 }
418         }
419         /* Setup device specific inb SA table */
420         rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
421         if (rc) {
422                 plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
423                 goto free_mem;
424         }
425
426         /* Allocate memory for RQ's */
427         rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
428         if (!rqs) {
429                 plt_err("Failed to allocate memory for RQ's");
430                 goto free_mem;
431         }
432         inl_dev->rqs = rqs;
433
434         return 0;
435 free_mem:
436         plt_free(inl_dev->inb_sa_base);
437         inl_dev->inb_sa_base = NULL;
438 unregister_irqs:
439         nix_inl_nix_unregister_irqs(inl_dev);
440 lf_free:
441         mbox_alloc_msg_nix_lf_free(mbox);
442         rc |= mbox_process(mbox);
443         return rc;
444 }
445
446 static int
447 nix_inl_nix_release(struct nix_inl_dev *inl_dev)
448 {
449         struct dev *dev = &inl_dev->dev;
450         struct mbox *mbox = dev->mbox;
451         struct nix_lf_free_req *req;
452         struct ndc_sync_op *ndc_req;
453         int rc = -ENOSPC;
454
455         /* Disable Inbound processing */
456         rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
457         if (rc)
458                 plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
459
460         /* Sync NDC-NIX for LF */
461         ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
462         if (ndc_req == NULL)
463                 return rc;
464         ndc_req->nix_lf_rx_sync = 1;
465         rc = mbox_process(mbox);
466         if (rc)
467                 plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
468
469         /* Unregister IRQs */
470         nix_inl_nix_unregister_irqs(inl_dev);
471
472         /* By default all associated mcam rules are deleted */
473         req = mbox_alloc_msg_nix_lf_free(mbox);
474         if (req == NULL)
475                 return -ENOSPC;
476
477         rc = mbox_process(mbox);
478         if (rc)
479                 return rc;
480
481         plt_free(inl_dev->rqs);
482         plt_free(inl_dev->inb_sa_base);
483         inl_dev->rqs = NULL;
484         inl_dev->inb_sa_base = NULL;
485         return 0;
486 }
487
488 static int
489 nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
490 {
491         struct msix_offset_rsp *msix_rsp;
492         struct dev *dev = &inl_dev->dev;
493         struct mbox *mbox = dev->mbox;
494         struct rsrc_attach_req *req;
495         uint64_t nix_blkaddr;
496         int rc = -ENOSPC;
497
498         req = mbox_alloc_msg_attach_resources(mbox);
499         if (req == NULL)
500                 return rc;
501         req->modify = true;
502         /* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
503         req->nixlf = true;
504         req->ssow = 1;
505         req->sso = 1;
506         if (inl_dev->attach_cptlf) {
507                 req->cptlfs = 1;
508                 req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
509         }
510
511         rc = mbox_process(dev->mbox);
512         if (rc)
513                 return rc;
514
515         /* Get MSIX vector offsets */
516         mbox_alloc_msg_msix_offset(mbox);
517         rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
518         if (rc)
519                 return rc;
520
521         inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
522         inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
523         inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
524         inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
525
526         nix_blkaddr = nix_get_blkaddr(dev);
527         inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
528
529         /* Update base addresses for LF's */
530         inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
531         inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
532         inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
533         inl_dev->cpt_base = dev->bar2 + (RVU_BLOCK_ADDR_CPT0 << 20);
534
535         return 0;
536 }
537
538 static int
539 nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
540 {
541         struct dev *dev = &inl_dev->dev;
542         struct mbox *mbox = dev->mbox;
543         struct rsrc_detach_req *req;
544         int rc = -ENOSPC;
545
546         req = mbox_alloc_msg_detach_resources(mbox);
547         if (req == NULL)
548                 return rc;
549         req->partial = true;
550         req->nixlf = true;
551         req->ssow = true;
552         req->sso = true;
553         req->cptlfs = !!inl_dev->attach_cptlf;
554
555         return mbox_process(dev->mbox);
556 }
557
558 static int
559 nix_inl_dev_wait_for_sso_empty(struct nix_inl_dev *inl_dev)
560 {
561         uintptr_t sso_base = inl_dev->sso_base;
562         int wait_ms = 3000;
563
564         while (wait_ms > 0) {
565                 /* Break when empty */
566                 if (!plt_read64(sso_base + SSO_LF_GGRP_XAQ_CNT) &&
567                     !plt_read64(sso_base + SSO_LF_GGRP_AQ_CNT))
568                         return 0;
569
570                 plt_delay_us(1000);
571                 wait_ms -= 1;
572         }
573
574         return -ETIMEDOUT;
575 }
576
577 int
578 roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
579 {
580         struct idev_cfg *idev = idev_get_cfg();
581         struct nix_inl_dev *inl_dev;
582         int rc, i;
583
584         if (idev == NULL)
585                 return 0;
586
587         inl_dev = idev->nix_inl_dev;
588         /* Nothing to do if no inline device */
589         if (!inl_dev)
590                 return 0;
591
592         if (!aura_handle) {
593                 inl_dev->nb_xae = inl_dev->iue;
594                 goto no_pool;
595         }
596
597         /* Check if aura is already considered */
598         for (i = 0; i < inl_dev->pkt_pools_cnt; i++) {
599                 if (inl_dev->pkt_pools[i] == aura_handle)
600                         return 0;
601         }
602
603 no_pool:
604         /* Disable RQ if enabled */
605         for (i = 0; i < inl_dev->nb_rqs; i++) {
606                 if (!inl_dev->rqs[i].inl_dev_refs)
607                         continue;
608                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
609                 if (rc) {
610                         plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
611                                 rc);
612                         return rc;
613                 }
614         }
615
616         /* Wait for events to be removed */
617         rc = nix_inl_dev_wait_for_sso_empty(inl_dev);
618         if (rc) {
619                 plt_err("Timeout waiting for inline device event cleanup");
620                 goto exit;
621         }
622
623         /* Disable HWGRP */
624         plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
625
626         inl_dev->pkt_pools_cnt++;
627         inl_dev->pkt_pools =
628                 plt_realloc(inl_dev->pkt_pools,
629                             sizeof(uint64_t) * inl_dev->pkt_pools_cnt, 0);
630         if (!inl_dev->pkt_pools)
631                 inl_dev->pkt_pools_cnt = 0;
632         else
633                 inl_dev->pkt_pools[inl_dev->pkt_pools_cnt - 1] = aura_handle;
634         inl_dev->nb_xae += roc_npa_aura_op_limit_get(aura_handle);
635
636         /* Realloc XAQ aura */
637         rc = sso_hwgrp_init_xaq_aura(&inl_dev->dev, &inl_dev->xaq,
638                                      inl_dev->nb_xae, inl_dev->xae_waes,
639                                      inl_dev->xaq_buf_size, 1);
640         if (rc) {
641                 plt_err("Failed to reinitialize xaq aura, rc=%d", rc);
642                 return rc;
643         }
644
645         /* Setup xaq for hwgrps */
646         rc = sso_hwgrp_alloc_xaq(&inl_dev->dev, inl_dev->xaq.aura_handle, 1);
647         if (rc) {
648                 plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
649                 return rc;
650         }
651
652         /* Enable HWGRP */
653         plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
654
655 exit:
656         /* Renable RQ */
657         for (i = 0; i < inl_dev->nb_rqs; i++) {
658                 if (!inl_dev->rqs[i].inl_dev_refs)
659                         continue;
660
661                 rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
662                 if (rc)
663                         plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
664                                 rc);
665         }
666
667         return rc;
668 }
669
670 static void
671 inl_outb_soft_exp_poll(struct nix_inl_dev *inl_dev, uint32_t ring_idx)
672 {
673         union roc_ot_ipsec_err_ring_head head;
674         struct roc_ot_ipsec_outb_sa *sa;
675         uint16_t head_l, tail_l;
676         uint64_t *ring_base;
677         uint32_t port_id;
678
679         port_id = ring_idx / ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS;
680         ring_base = PLT_PTR_CAST(inl_dev->sa_soft_exp_ring[ring_idx]);
681         if (!ring_base) {
682                 plt_err("Invalid soft exp ring base");
683                 return;
684         }
685
686         head.u64 = __atomic_load_n(ring_base, __ATOMIC_ACQUIRE);
687         head_l = head.s.head_pos;
688         tail_l = head.s.tail_pos;
689
690         while (tail_l != head_l) {
691                 union roc_ot_ipsec_err_ring_entry entry;
692                 int poll_counter = 0;
693
694                 while (poll_counter++ <
695                        ROC_NIX_INL_SA_SOFT_EXP_ERR_MAX_POLL_COUNT) {
696                         plt_delay_us(20);
697                         entry.u64 = __atomic_load_n(ring_base + tail_l + 1,
698                                                     __ATOMIC_ACQUIRE);
699                         if (likely(entry.u64))
700                                 break;
701                 }
702
703                 entry.u64 = plt_be_to_cpu_64(entry.u64);
704                 sa = (struct roc_ot_ipsec_outb_sa *)(((uint64_t)entry.s.data1
705                                                       << 51) |
706                                                      (entry.s.data0 << 7));
707
708                 if (sa != NULL) {
709                         uint64_t tmp = ~(uint32_t)0x0;
710                         inl_dev->work_cb(&tmp, sa, (port_id << 8) | 0x1);
711                         __atomic_store_n(ring_base + tail_l + 1, 0ULL,
712                                          __ATOMIC_RELAXED);
713                         __atomic_add_fetch((uint32_t *)ring_base, 1,
714                                            __ATOMIC_ACQ_REL);
715                 } else
716                         plt_err("Invalid SA");
717
718                 tail_l++;
719         }
720 }
721
722 static void *
723 nix_inl_outb_poll_thread(void *args)
724 {
725         struct nix_inl_dev *inl_dev = args;
726         uint32_t poll_freq;
727         uint32_t i;
728         bool bit;
729
730         poll_freq = inl_dev->soft_exp_poll_freq;
731
732         while (!soft_exp_poll_thread_exit) {
733                 if (soft_exp_consumer_cnt) {
734                         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++) {
735                                 bit = plt_bitmap_get(
736                                         inl_dev->soft_exp_ring_bmap, i);
737                                 if (bit)
738                                         inl_outb_soft_exp_poll(inl_dev, i);
739                         }
740                 }
741                 usleep(poll_freq);
742         }
743
744         return 0;
745 }
746
747 static int
748 nix_inl_outb_poll_thread_setup(struct nix_inl_dev *inl_dev)
749 {
750         struct plt_bitmap *bmap;
751         size_t bmap_sz;
752         uint32_t i;
753         void *mem;
754         int rc;
755
756         /* Allocate a bitmap that pool thread uses to get the port_id
757          * that's corresponding to the inl_outb_soft_exp_ring
758          */
759         bmap_sz =
760                 plt_bitmap_get_memory_footprint(ROC_NIX_INL_MAX_SOFT_EXP_RNGS);
761         mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
762         if (mem == NULL) {
763                 plt_err("soft expiry ring bmap alloc failed");
764                 rc = -ENOMEM;
765                 goto exit;
766         }
767
768         bmap = plt_bitmap_init(ROC_NIX_INL_MAX_SOFT_EXP_RNGS, mem, bmap_sz);
769         if (!bmap) {
770                 plt_err("soft expiry ring bmap init failed");
771                 plt_free(mem);
772                 rc = -ENOMEM;
773                 goto exit;
774         }
775
776         inl_dev->soft_exp_ring_bmap_mem = mem;
777         inl_dev->soft_exp_ring_bmap = bmap;
778         inl_dev->sa_soft_exp_ring = plt_zmalloc(
779                 ROC_NIX_INL_MAX_SOFT_EXP_RNGS * sizeof(uint64_t), 0);
780         if (!inl_dev->sa_soft_exp_ring) {
781                 plt_err("soft expiry ring pointer array alloc failed");
782                 plt_free(mem);
783                 rc = -ENOMEM;
784                 goto exit;
785         }
786
787         for (i = 0; i < ROC_NIX_INL_MAX_SOFT_EXP_RNGS; i++)
788                 plt_bitmap_clear(inl_dev->soft_exp_ring_bmap, i);
789
790         soft_exp_consumer_cnt = 0;
791         soft_exp_poll_thread_exit = false;
792         inl_dev->soft_exp_poll_freq = 100;
793         rc = plt_ctrl_thread_create(&inl_dev->soft_exp_poll_thread,
794                                     "OUTB_SOFT_EXP_POLL_THREAD", NULL,
795                                     nix_inl_outb_poll_thread, inl_dev);
796         if (rc) {
797                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
798                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
799         }
800
801 exit:
802         return rc;
803 }
804
805 int
806 roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
807 {
808         struct plt_pci_device *pci_dev;
809         struct nix_inl_dev *inl_dev;
810         struct idev_cfg *idev;
811         int rc;
812
813         pci_dev = roc_inl_dev->pci_dev;
814
815         /* Skip probe if already done */
816         idev = idev_get_cfg();
817         if (idev == NULL)
818                 return -ENOTSUP;
819
820         if (idev->nix_inl_dev) {
821                 plt_info("Skipping device %s, inline device already probed",
822                          pci_dev->name);
823                 return -EEXIST;
824         }
825
826         PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
827
828         inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
829         memset(inl_dev, 0, sizeof(*inl_dev));
830
831         inl_dev->pci_dev = pci_dev;
832         inl_dev->ipsec_in_min_spi = roc_inl_dev->ipsec_in_min_spi;
833         inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
834         inl_dev->selftest = roc_inl_dev->selftest;
835         inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
836         inl_dev->channel = roc_inl_dev->channel;
837         inl_dev->chan_mask = roc_inl_dev->chan_mask;
838         inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
839         inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
840         inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
841         inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
842         inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
843         inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
844
845         if (roc_inl_dev->spb_drop_pc)
846                 inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
847         if (roc_inl_dev->lpb_drop_pc)
848                 inl_dev->lpb_drop_pc = roc_inl_dev->lpb_drop_pc;
849
850         /* Initialize base device */
851         rc = dev_init(&inl_dev->dev, pci_dev);
852         if (rc) {
853                 plt_err("Failed to init roc device");
854                 goto error;
855         }
856
857         /* Attach LF resources */
858         rc = nix_inl_lf_attach(inl_dev);
859         if (rc) {
860                 plt_err("Failed to attach LF resources, rc=%d", rc);
861                 goto dev_cleanup;
862         }
863
864         /* Setup NIX LF */
865         rc = nix_inl_nix_setup(inl_dev);
866         if (rc)
867                 goto lf_detach;
868
869         /* Setup SSO LF */
870         rc = nix_inl_sso_setup(inl_dev);
871         if (rc)
872                 goto nix_release;
873
874         /* Setup CPT LF */
875         rc = nix_inl_cpt_setup(inl_dev);
876         if (rc)
877                 goto sso_release;
878
879         if (inl_dev->set_soft_exp_poll) {
880                 rc = nix_inl_outb_poll_thread_setup(inl_dev);
881                 if (rc)
882                         goto cpt_release;
883         }
884
885         /* Perform selftest if asked for */
886         if (inl_dev->selftest) {
887                 rc = nix_inl_selftest();
888                 if (rc)
889                         goto cpt_release;
890         }
891
892         idev->nix_inl_dev = inl_dev;
893
894         return 0;
895 cpt_release:
896         rc |= nix_inl_cpt_release(inl_dev);
897 sso_release:
898         rc |= nix_inl_sso_release(inl_dev);
899 nix_release:
900         rc |= nix_inl_nix_release(inl_dev);
901 lf_detach:
902         rc |= nix_inl_lf_detach(inl_dev);
903 dev_cleanup:
904         rc |= dev_fini(&inl_dev->dev, pci_dev);
905 error:
906         return rc;
907 }
908
909 int
910 roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
911 {
912         struct plt_pci_device *pci_dev;
913         struct nix_inl_dev *inl_dev;
914         struct idev_cfg *idev;
915         int rc;
916
917         idev = idev_get_cfg();
918         if (idev == NULL)
919                 return 0;
920
921         if (!idev->nix_inl_dev ||
922             PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
923                 return 0;
924
925         inl_dev = idev->nix_inl_dev;
926         pci_dev = inl_dev->pci_dev;
927
928         if (inl_dev->set_soft_exp_poll) {
929                 soft_exp_poll_thread_exit = true;
930                 pthread_join(inl_dev->soft_exp_poll_thread, NULL);
931                 plt_bitmap_free(inl_dev->soft_exp_ring_bmap);
932                 plt_free(inl_dev->soft_exp_ring_bmap_mem);
933                 plt_free(inl_dev->sa_soft_exp_ring);
934         }
935
936         /* Flush Inbound CTX cache entries */
937         nix_inl_cpt_ctx_cache_sync(inl_dev);
938
939         /* Release SSO */
940         rc = nix_inl_sso_release(inl_dev);
941
942         /* Release NIX */
943         rc |= nix_inl_nix_release(inl_dev);
944
945         /* Detach LF's */
946         rc |= nix_inl_lf_detach(inl_dev);
947
948         /* Cleanup mbox */
949         rc |= dev_fini(&inl_dev->dev, pci_dev);
950         if (rc)
951                 return rc;
952
953         idev->nix_inl_dev = NULL;
954         return 0;
955 }