b53eb9e5eefa6f58b04adb1dc51eb8cab7b77fcc
[dpdk.git] / drivers / bus / dpaa / base / qbman / qman.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017,2019 NXP
5  *
6  */
7
8 #include "qman.h"
9 #include <rte_branch_prediction.h>
10 #include <rte_dpaa_bus.h>
11 #include <rte_eventdev.h>
12 #include <rte_byteorder.h>
13
14 #include <dpaa_bits.h>
15
16 /* Compilation constants */
17 #define DQRR_MAXFILL    15
18 #define EQCR_ITHRESH    4       /* if EQCR congests, interrupt threshold */
19 #define IRQNAME         "QMan portal %d"
20 #define MAX_IRQNAME     16      /* big enough for "QMan portal %d" */
21 /* maximum number of DQRR entries to process in qman_poll() */
22 #define FSL_QMAN_POLL_LIMIT 8
23
24 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
25  * inter-processor locking only. Note, FQLOCK() is always called either under a
26  * local_irq_save() or from interrupt context - hence there's no need for irq
27  * protection (and indeed, attempting to nest irq-protection doesn't work, as
28  * the "irq en/disable" machinery isn't recursive...).
29  */
30 #define FQLOCK(fq) \
31         do { \
32                 struct qman_fq *__fq478 = (fq); \
33                 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
34                         spin_lock(&__fq478->fqlock); \
35         } while (0)
36 #define FQUNLOCK(fq) \
37         do { \
38                 struct qman_fq *__fq478 = (fq); \
39                 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
40                         spin_unlock(&__fq478->fqlock); \
41         } while (0)
42
43 static inline void fq_set(struct qman_fq *fq, u32 mask)
44 {
45         dpaa_set_bits(mask, &fq->flags);
46 }
47
48 static inline void fq_clear(struct qman_fq *fq, u32 mask)
49 {
50         dpaa_clear_bits(mask, &fq->flags);
51 }
52
53 static inline int fq_isset(struct qman_fq *fq, u32 mask)
54 {
55         return fq->flags & mask;
56 }
57
58 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
59 {
60         return !(fq->flags & mask);
61 }
62
63 struct qman_portal {
64         struct qm_portal p;
65         /* PORTAL_BITS_*** - dynamic, strictly internal */
66         unsigned long bits;
67         /* interrupt sources processed by portal_isr(), configurable */
68         unsigned long irq_sources;
69         u32 use_eqcr_ci_stashing;
70         u32 slowpoll;   /* only used when interrupts are off */
71         /* only 1 volatile dequeue at a time */
72         struct qman_fq *vdqcr_owned;
73         u32 sdqcr;
74         int dqrr_disable_ref;
75         /* A portal-specific handler for DCP ERNs. If this is NULL, the global
76          * handler is called instead.
77          */
78         qman_cb_dc_ern cb_dc_ern;
79         /* When the cpu-affine portal is activated, this is non-NULL */
80         const struct qm_portal_config *config;
81         struct dpa_rbtree retire_table;
82         char irqname[MAX_IRQNAME];
83         /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
84         struct qman_cgrs *cgrs;
85         /* linked-list of CSCN handlers. */
86         struct list_head cgr_cbs;
87         /* list lock */
88         spinlock_t cgr_lock;
89         /* track if memory was allocated by the driver */
90 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
91         /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
92          * do byte swaps of DQRR read only memory.  First entry must be aligned
93          * to 2 ** 10 to ensure DQRR index calculations based shadow copy
94          * address (6 bits for address shift + 4 bits for the DQRR size).
95          */
96         struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
97                     __attribute__((aligned(1024)));
98 #endif
99 };
100
101 /* Global handler for DCP ERNs. Used when the portal receiving the message does
102  * not have a portal-specific handler.
103  */
104 static qman_cb_dc_ern cb_dc_ern;
105
106 static cpumask_t affine_mask;
107 static DEFINE_SPINLOCK(affine_mask_lock);
108 static u16 affine_channels[NR_CPUS];
109 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
110
111 static inline struct qman_portal *get_affine_portal(void)
112 {
113         return &RTE_PER_LCORE(qman_affine_portal);
114 }
115
116 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
117  * retirement notifications (the fact they are sometimes h/w-consumed means that
118  * contextB isn't always a s/w demux - and as we can't know which case it is
119  * when looking at the notification, we have to use the slow lookup for all of
120  * them). NB, it's possible to have multiple FQ objects refer to the same FQID
121  * (though at most one of them should be the consumer), so this table isn't for
122  * all FQs - FQs are added when retirement commands are issued, and removed when
123  * they complete, which also massively reduces the size of this table.
124  */
125 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
126 /*
127  * This is what everything can wait on, even if it migrates to a different cpu
128  * to the one whose affine portal it is waiting on.
129  */
130 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
131
132 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
133 {
134         int ret = fqtree_push(&p->retire_table, fq);
135
136         if (ret)
137                 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
138         return ret;
139 }
140
141 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
142 {
143         fqtree_del(&p->retire_table, fq);
144 }
145
146 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
147 {
148         return fqtree_find(&p->retire_table, fqid);
149 }
150
151 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
152 static void **qman_fq_lookup_table;
153 static size_t qman_fq_lookup_table_size;
154
155 int qman_setup_fq_lookup_table(size_t num_entries)
156 {
157         num_entries++;
158         /* Allocate 1 more entry since the first entry is not used */
159         qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
160         if (!qman_fq_lookup_table) {
161                 pr_err("QMan: Could not allocate fq lookup table\n");
162                 return -ENOMEM;
163         }
164         memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
165         qman_fq_lookup_table_size = num_entries;
166         pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
167                 qman_fq_lookup_table,
168                         (unsigned long)qman_fq_lookup_table_size);
169         return 0;
170 }
171
172 void qman_set_fq_lookup_table(void **fq_table)
173 {
174         qman_fq_lookup_table = fq_table;
175 }
176
177 /* global structure that maintains fq object mapping */
178 static DEFINE_SPINLOCK(fq_hash_table_lock);
179
180 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
181 {
182         u32 i;
183
184         spin_lock(&fq_hash_table_lock);
185         /* Can't use index zero because this has special meaning
186          * in context_b field.
187          */
188         for (i = 1; i < qman_fq_lookup_table_size; i++) {
189                 if (qman_fq_lookup_table[i] == NULL) {
190                         *entry = i;
191                         qman_fq_lookup_table[i] = fq;
192                         spin_unlock(&fq_hash_table_lock);
193                         return 0;
194                 }
195         }
196         spin_unlock(&fq_hash_table_lock);
197         return -ENOMEM;
198 }
199
200 static void clear_fq_table_entry(u32 entry)
201 {
202         spin_lock(&fq_hash_table_lock);
203         DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
204         qman_fq_lookup_table[entry] = NULL;
205         spin_unlock(&fq_hash_table_lock);
206 }
207
208 static inline struct qman_fq *get_fq_table_entry(u32 entry)
209 {
210         DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
211         return qman_fq_lookup_table[entry];
212 }
213 #endif
214
215 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
216 {
217         /* Byteswap the FQD to HW format */
218         fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
219         fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
220         fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
221         fqd->context_b = cpu_to_be32(fqd->context_b);
222         fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
223         fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
224 }
225
226 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
227 {
228         /* Byteswap the FQD to CPU format */
229         fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
230         fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
231         fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
232         fqd->context_b = be32_to_cpu(fqd->context_b);
233         fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
234 }
235
236 static inline void cpu_to_hw_fd(struct qm_fd *fd)
237 {
238         fd->addr = cpu_to_be40(fd->addr);
239         fd->status = cpu_to_be32(fd->status);
240         fd->opaque = cpu_to_be32(fd->opaque);
241 }
242
243 static inline void hw_fd_to_cpu(struct qm_fd *fd)
244 {
245         fd->addr = be40_to_cpu(fd->addr);
246         fd->status = be32_to_cpu(fd->status);
247         fd->opaque = be32_to_cpu(fd->opaque);
248 }
249
250 /* In the case that slow- and fast-path handling are both done by qman_poll()
251  * (ie. because there is no interrupt handling), we ought to balance how often
252  * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
253  * sources, so we call the fast poll 'n' times before calling the slow poll
254  * once. The idle decrementer constant is used when the last slow-poll detected
255  * no work to do, and the busy decrementer constant when the last slow-poll had
256  * work to do.
257  */
258 #define SLOW_POLL_IDLE   1000
259 #define SLOW_POLL_BUSY   10
260 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
261 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
262                                               unsigned int poll_limit);
263
264 /* Portal interrupt handler */
265 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
266 {
267         struct qman_portal *p = ptr;
268         /*
269          * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
270          * it could race against a Query Congestion State command also given
271          * as part of the handling of this interrupt source. We mustn't
272          * clear it a second time in this top-level function.
273          */
274         u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
275                 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
276         u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
277         /* DQRR-handling if it's interrupt-driven */
278         if (is & QM_PIRQ_DQRI)
279                 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
280         /* Handling of anything else that's interrupt-driven */
281         clear |= __poll_portal_slow(p, is);
282         qm_isr_status_clear(&p->p, clear);
283         return IRQ_HANDLED;
284 }
285
286 /* This inner version is used privately by qman_create_affine_portal(), as well
287  * as by the exported qman_stop_dequeues().
288  */
289 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
290 {
291         if (!(p->dqrr_disable_ref++))
292                 qm_dqrr_set_maxfill(&p->p, 0);
293 }
294
295 static int drain_mr_fqrni(struct qm_portal *p)
296 {
297         const struct qm_mr_entry *msg;
298 loop:
299         msg = qm_mr_current(p);
300         if (!msg) {
301                 /*
302                  * if MR was full and h/w had other FQRNI entries to produce, we
303                  * need to allow it time to produce those entries once the
304                  * existing entries are consumed. A worst-case situation
305                  * (fully-loaded system) means h/w sequencers may have to do 3-4
306                  * other things before servicing the portal's MR pump, each of
307                  * which (if slow) may take ~50 qman cycles (which is ~200
308                  * processor cycles). So rounding up and then multiplying this
309                  * worst-case estimate by a factor of 10, just to be
310                  * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
311                  * one entry at a time, so h/w has an opportunity to produce new
312                  * entries well before the ring has been fully consumed, so
313                  * we're being *really* paranoid here.
314                  */
315                 u64 now, then = mfatb();
316
317                 do {
318                         now = mfatb();
319                 } while ((then + 10000) > now);
320                 msg = qm_mr_current(p);
321                 if (!msg)
322                         return 0;
323         }
324         if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
325                 /* We aren't draining anything but FQRNIs */
326                 pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
327                 return -1;
328         }
329         qm_mr_next(p);
330         qm_mr_cci_consume(p, 1);
331         goto loop;
332 }
333
334 static inline int qm_eqcr_init(struct qm_portal *portal,
335                                enum qm_eqcr_pmode pmode,
336                                unsigned int eq_stash_thresh,
337                                int eq_stash_prio)
338 {
339         /* This use of 'register', as well as all other occurrences, is because
340          * it has been observed to generate much faster code with gcc than is
341          * otherwise the case.
342          */
343         register struct qm_eqcr *eqcr = &portal->eqcr;
344         u32 cfg;
345         u8 pi;
346
347         eqcr->ring = portal->addr.ce + QM_CL_EQCR;
348         eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
349         qm_cl_invalidate(EQCR_CI);
350         pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
351         eqcr->cursor = eqcr->ring + pi;
352         eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
353                         QM_EQCR_VERB_VBIT : 0;
354         eqcr->available = QM_EQCR_SIZE - 1 -
355                         qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
356         eqcr->ithresh = qm_in(EQCR_ITR);
357 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
358         eqcr->busy = 0;
359         eqcr->pmode = pmode;
360 #endif
361         cfg = (qm_in(CFG) & 0x00ffffff) |
362                 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
363                 (eq_stash_prio << 26)   | /* QCSP_CFG: EP */
364                 ((pmode & 0x3) << 24);  /* QCSP_CFG::EPM */
365         qm_out(CFG, cfg);
366         return 0;
367 }
368
369 static inline void qm_eqcr_finish(struct qm_portal *portal)
370 {
371         register struct qm_eqcr *eqcr = &portal->eqcr;
372         u8 pi, ci;
373         u32 cfg;
374
375         /*
376          * Disable EQCI stashing because the QMan only
377          * presents the value it previously stashed to
378          * maintain coherency.  Setting the stash threshold
379          * to 1 then 0 ensures that QMan has resyncronized
380          * its internal copy so that the portal is clean
381          * when it is reinitialized in the future
382          */
383         cfg = (qm_in(CFG) & 0x0fffffff) |
384                 (1 << 28); /* QCSP_CFG: EST */
385         qm_out(CFG, cfg);
386         cfg &= 0x0fffffff; /* stash threshold = 0 */
387         qm_out(CFG, cfg);
388
389         pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
390         ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
391
392         /* Refresh EQCR CI cache value */
393         qm_cl_invalidate(EQCR_CI);
394         eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
395
396 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
397         DPAA_ASSERT(!eqcr->busy);
398 #endif
399         if (pi != EQCR_PTR2IDX(eqcr->cursor))
400                 pr_crit("losing uncommitted EQCR entries\n");
401         if (ci != eqcr->ci)
402                 pr_crit("missing existing EQCR completions\n");
403         if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
404                 pr_crit("EQCR destroyed unquiesced\n");
405 }
406
407 static inline int qm_dqrr_init(struct qm_portal *portal,
408                         __maybe_unused const struct qm_portal_config *config,
409                         enum qm_dqrr_dmode dmode,
410                         __maybe_unused enum qm_dqrr_pmode pmode,
411                         enum qm_dqrr_cmode cmode, u8 max_fill)
412 {
413         register struct qm_dqrr *dqrr = &portal->dqrr;
414         u32 cfg;
415
416         /* Make sure the DQRR will be idle when we enable */
417         qm_out(DQRR_SDQCR, 0);
418         qm_out(DQRR_VDQCR, 0);
419         qm_out(DQRR_PDQCR, 0);
420         dqrr->ring = portal->addr.ce + QM_CL_DQRR;
421         dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
422         dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
423         dqrr->cursor = dqrr->ring + dqrr->ci;
424         dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
425         dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
426                         QM_DQRR_VERB_VBIT : 0;
427         dqrr->ithresh = qm_in(DQRR_ITR);
428 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
429         dqrr->dmode = dmode;
430         dqrr->pmode = pmode;
431         dqrr->cmode = cmode;
432 #endif
433         /* Invalidate every ring entry before beginning */
434         for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
435                 dccivac(qm_cl(dqrr->ring, cfg));
436         cfg = (qm_in(CFG) & 0xff000f00) |
437                 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
438                 ((dmode & 1) << 18) |                   /* DP */
439                 ((cmode & 3) << 16) |                   /* DCM */
440                 0xa0 |                                  /* RE+SE */
441                 (0 ? 0x40 : 0) |                        /* Ignore RP */
442                 (0 ? 0x10 : 0);                         /* Ignore SP */
443         qm_out(CFG, cfg);
444         qm_dqrr_set_maxfill(portal, max_fill);
445         return 0;
446 }
447
448 static inline void qm_dqrr_finish(struct qm_portal *portal)
449 {
450         __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
451 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
452         if ((dqrr->cmode != qm_dqrr_cdc) &&
453             (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
454                 pr_crit("Ignoring completed DQRR entries\n");
455 #endif
456 }
457
458 static inline int qm_mr_init(struct qm_portal *portal,
459                              __maybe_unused enum qm_mr_pmode pmode,
460                              enum qm_mr_cmode cmode)
461 {
462         register struct qm_mr *mr = &portal->mr;
463         u32 cfg;
464
465         mr->ring = portal->addr.ce + QM_CL_MR;
466         mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
467         mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
468         mr->cursor = mr->ring + mr->ci;
469         mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
470         mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
471         mr->ithresh = qm_in(MR_ITR);
472 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
473         mr->pmode = pmode;
474         mr->cmode = cmode;
475 #endif
476         cfg = (qm_in(CFG) & 0xfffff0ff) |
477                 ((cmode & 1) << 8);             /* QCSP_CFG:MM */
478         qm_out(CFG, cfg);
479         return 0;
480 }
481
482 static inline void qm_mr_pvb_update(struct qm_portal *portal)
483 {
484         register struct qm_mr *mr = &portal->mr;
485         const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
486
487 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
488         DPAA_ASSERT(mr->pmode == qm_mr_pvb);
489 #endif
490         /* when accessing 'verb', use __raw_readb() to ensure that compiler
491          * inlining doesn't try to optimise out "excess reads".
492          */
493         if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
494                 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
495                 if (!mr->pi)
496                         mr->vbit ^= QM_MR_VERB_VBIT;
497                 mr->fill++;
498                 res = MR_INC(res);
499         }
500         dcbit_ro(res);
501 }
502
503 struct qman_portal *
504 qman_init_portal(struct qman_portal *portal,
505                    const struct qm_portal_config *c,
506                    const struct qman_cgrs *cgrs)
507 {
508         struct qm_portal *p;
509         char buf[16];
510         int ret;
511         u32 isdr;
512
513         p = &portal->p;
514
515         if (!c)
516                 c = portal->config;
517
518         if (dpaa_svr_family == SVR_LS1043A_FAMILY)
519                 portal->use_eqcr_ci_stashing = 3;
520         else
521                 portal->use_eqcr_ci_stashing =
522                                         ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
523
524         /*
525          * prep the low-level portal struct with the mapped addresses from the
526          * config, everything that follows depends on it and "config" is more
527          * for (de)reference
528          */
529         p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
530         p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
531         /*
532          * If CI-stashing is used, the current defaults use a threshold of 3,
533          * and stash with high-than-DQRR priority.
534          */
535         if (qm_eqcr_init(p, qm_eqcr_pvb,
536                          portal->use_eqcr_ci_stashing, 1)) {
537                 pr_err("Qman EQCR initialisation failed\n");
538                 goto fail_eqcr;
539         }
540         if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
541                          qm_dqrr_cdc, DQRR_MAXFILL)) {
542                 pr_err("Qman DQRR initialisation failed\n");
543                 goto fail_dqrr;
544         }
545         if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
546                 pr_err("Qman MR initialisation failed\n");
547                 goto fail_mr;
548         }
549         if (qm_mc_init(p)) {
550                 pr_err("Qman MC initialisation failed\n");
551                 goto fail_mc;
552         }
553
554         /* static interrupt-gating controls */
555         qm_dqrr_set_ithresh(p, 0);
556         qm_mr_set_ithresh(p, 0);
557         qm_isr_set_iperiod(p, 0);
558         portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
559         if (!portal->cgrs)
560                 goto fail_cgrs;
561         /* initial snapshot is no-depletion */
562         qman_cgrs_init(&portal->cgrs[1]);
563         if (cgrs)
564                 portal->cgrs[0] = *cgrs;
565         else
566                 /* if the given mask is NULL, assume all CGRs can be seen */
567                 qman_cgrs_fill(&portal->cgrs[0]);
568         INIT_LIST_HEAD(&portal->cgr_cbs);
569         spin_lock_init(&portal->cgr_lock);
570         portal->bits = 0;
571         portal->slowpoll = 0;
572         portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
573                         QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
574                         QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
575         portal->dqrr_disable_ref = 0;
576         portal->cb_dc_ern = NULL;
577         sprintf(buf, "qportal-%d", c->channel);
578         dpa_rbtree_init(&portal->retire_table);
579         isdr = 0xffffffff;
580         qm_isr_disable_write(p, isdr);
581         portal->irq_sources = 0;
582         qm_isr_enable_write(p, portal->irq_sources);
583         qm_isr_status_clear(p, 0xffffffff);
584         snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
585         if (request_irq(c->irq, portal_isr, 0, portal->irqname,
586                         portal)) {
587                 pr_err("request_irq() failed\n");
588                 goto fail_irq;
589         }
590
591         /* Need EQCR to be empty before continuing */
592         isdr &= ~QM_PIRQ_EQCI;
593         qm_isr_disable_write(p, isdr);
594         ret = qm_eqcr_get_fill(p);
595         if (ret) {
596                 pr_err("Qman EQCR unclean\n");
597                 goto fail_eqcr_empty;
598         }
599         isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
600         qm_isr_disable_write(p, isdr);
601         if (qm_dqrr_current(p)) {
602                 pr_err("Qman DQRR unclean\n");
603                 qm_dqrr_cdc_consume_n(p, 0xffff);
604         }
605         if (qm_mr_current(p) && drain_mr_fqrni(p)) {
606                 /* special handling, drain just in case it's a few FQRNIs */
607                 if (drain_mr_fqrni(p))
608                         goto fail_dqrr_mr_empty;
609         }
610         /* Success */
611         portal->config = c;
612         qm_isr_disable_write(p, 0);
613         qm_isr_uninhibit(p);
614         /* Write a sane SDQCR */
615         qm_dqrr_sdqcr_set(p, portal->sdqcr);
616         return portal;
617 fail_dqrr_mr_empty:
618 fail_eqcr_empty:
619         free_irq(c->irq, portal);
620 fail_irq:
621         kfree(portal->cgrs);
622         spin_lock_destroy(&portal->cgr_lock);
623 fail_cgrs:
624         qm_mc_finish(p);
625 fail_mc:
626         qm_mr_finish(p);
627 fail_mr:
628         qm_dqrr_finish(p);
629 fail_dqrr:
630         qm_eqcr_finish(p);
631 fail_eqcr:
632         return NULL;
633 }
634
635 #define MAX_GLOBAL_PORTALS 8
636 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
637 static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
638
639 struct qman_portal *
640 qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
641 {
642         unsigned int i;
643
644         for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
645                 if (rte_atomic16_test_and_set(&global_portals_used[i])) {
646                         global_portals[i].config = q_pcfg;
647                         return &global_portals[i];
648                 }
649         }
650         pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
651
652         return NULL;
653 }
654
655 int
656 qman_free_global_portal(struct qman_portal *portal)
657 {
658         unsigned int i;
659
660         for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
661                 if (&global_portals[i] == portal) {
662                         rte_atomic16_clear(&global_portals_used[i]);
663                         return 0;
664                 }
665         }
666         return -1;
667 }
668
669 void
670 qman_portal_uninhibit_isr(struct qman_portal *portal)
671 {
672         qm_isr_uninhibit(&portal->p);
673 }
674
675 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
676                                               const struct qman_cgrs *cgrs)
677 {
678         struct qman_portal *res;
679         struct qman_portal *portal = get_affine_portal();
680
681         /* A criteria for calling this function (from qman_driver.c) is that
682          * we're already affine to the cpu and won't schedule onto another cpu.
683          */
684         res = qman_init_portal(portal, c, cgrs);
685         if (res) {
686                 spin_lock(&affine_mask_lock);
687                 CPU_SET(c->cpu, &affine_mask);
688                 affine_channels[c->cpu] =
689                         c->channel;
690                 spin_unlock(&affine_mask_lock);
691         }
692         return res;
693 }
694
695 static inline
696 void qman_destroy_portal(struct qman_portal *qm)
697 {
698         const struct qm_portal_config *pcfg;
699
700         /* Stop dequeues on the portal */
701         qm_dqrr_sdqcr_set(&qm->p, 0);
702
703         /*
704          * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
705          * something related to QM_PIRQ_EQCI, this may need fixing.
706          * Also, due to the prefetching model used for CI updates in the enqueue
707          * path, this update will only invalidate the CI cacheline *after*
708          * working on it, so we need to call this twice to ensure a full update
709          * irrespective of where the enqueue processing was at when the teardown
710          * began.
711          */
712         qm_eqcr_cce_update(&qm->p);
713         qm_eqcr_cce_update(&qm->p);
714         pcfg = qm->config;
715
716         free_irq(pcfg->irq, qm);
717
718         kfree(qm->cgrs);
719         qm_mc_finish(&qm->p);
720         qm_mr_finish(&qm->p);
721         qm_dqrr_finish(&qm->p);
722         qm_eqcr_finish(&qm->p);
723
724         qm->config = NULL;
725
726         spin_lock_destroy(&qm->cgr_lock);
727 }
728
729 const struct qm_portal_config *
730 qman_destroy_affine_portal(struct qman_portal *qp)
731 {
732         /* We don't want to redirect if we're a slave, use "raw" */
733         struct qman_portal *qm;
734         const struct qm_portal_config *pcfg;
735         int cpu;
736
737         if (qp == NULL)
738                 qm = get_affine_portal();
739         else
740                 qm = qp;
741         pcfg = qm->config;
742         cpu = pcfg->cpu;
743
744         qman_destroy_portal(qm);
745
746         spin_lock(&affine_mask_lock);
747         CPU_CLR(cpu, &affine_mask);
748         spin_unlock(&affine_mask_lock);
749
750         qman_free_global_portal(qm);
751
752         return pcfg;
753 }
754
755 int qman_get_portal_index(void)
756 {
757         struct qman_portal *p = get_affine_portal();
758         return p->config->index;
759 }
760
761 /* Inline helper to reduce nesting in __poll_portal_slow() */
762 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
763                                    const struct qm_mr_entry *msg, u8 verb)
764 {
765         FQLOCK(fq);
766         switch (verb) {
767         case QM_MR_VERB_FQRL:
768                 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
769                 fq_clear(fq, QMAN_FQ_STATE_ORL);
770                 table_del_fq(p, fq);
771                 break;
772         case QM_MR_VERB_FQRN:
773                 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
774                             (fq->state == qman_fq_state_sched));
775                 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
776                 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
777                 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
778                         fq_set(fq, QMAN_FQ_STATE_NE);
779                 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
780                         fq_set(fq, QMAN_FQ_STATE_ORL);
781                 else
782                         table_del_fq(p, fq);
783                 fq->state = qman_fq_state_retired;
784                 break;
785         case QM_MR_VERB_FQPN:
786                 DPAA_ASSERT(fq->state == qman_fq_state_sched);
787                 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
788                 fq->state = qman_fq_state_parked;
789         }
790         FQUNLOCK(fq);
791 }
792
793 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
794 {
795         const struct qm_mr_entry *msg;
796         struct qm_mr_entry swapped_msg;
797
798         if (is & QM_PIRQ_CSCI) {
799                 struct qman_cgrs rr, c;
800                 struct qm_mc_result *mcr;
801                 struct qman_cgr *cgr;
802
803                 spin_lock(&p->cgr_lock);
804                 /*
805                  * The CSCI bit must be cleared _before_ issuing the
806                  * Query Congestion State command, to ensure that a long
807                  * CGR State Change callback cannot miss an intervening
808                  * state change.
809                  */
810                 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
811                 qm_mc_start(&p->p);
812                 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
813                 while (!(mcr = qm_mc_result(&p->p)))
814                         cpu_relax();
815                 /* mask out the ones I'm not interested in */
816                 qman_cgrs_and(&rr, (const struct qman_cgrs *)
817                         &mcr->querycongestion.state, &p->cgrs[0]);
818                 /* check previous snapshot for delta, enter/exit congestion */
819                 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
820                 /* update snapshot */
821                 qman_cgrs_cp(&p->cgrs[1], &rr);
822                 /* Invoke callback */
823                 list_for_each_entry(cgr, &p->cgr_cbs, node)
824                         if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
825                                 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
826                 spin_unlock(&p->cgr_lock);
827         }
828
829         if (is & QM_PIRQ_EQRI) {
830                 qm_eqcr_cce_update(&p->p);
831                 qm_eqcr_set_ithresh(&p->p, 0);
832                 wake_up(&affine_queue);
833         }
834
835         if (is & QM_PIRQ_MRI) {
836                 struct qman_fq *fq;
837                 u8 verb, num = 0;
838 mr_loop:
839                 qm_mr_pvb_update(&p->p);
840                 msg = qm_mr_current(&p->p);
841                 if (!msg)
842                         goto mr_done;
843                 swapped_msg = *msg;
844                 hw_fd_to_cpu(&swapped_msg.ern.fd);
845                 verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
846                 /* The message is a software ERN iff the 0x20 bit is set */
847                 if (verb & 0x20) {
848                         switch (verb) {
849                         case QM_MR_VERB_FQRNI:
850                                 /* nada, we drop FQRNIs on the floor */
851                                 break;
852                         case QM_MR_VERB_FQRN:
853                         case QM_MR_VERB_FQRL:
854                                 /* Lookup in the retirement table */
855                                 fq = table_find_fq(p,
856                                                    be32_to_cpu(msg->fq.fqid));
857                                 DPAA_BUG_ON(!fq);
858                                 fq_state_change(p, fq, &swapped_msg, verb);
859                                 if (fq->cb.fqs)
860                                         fq->cb.fqs(p, fq, &swapped_msg);
861                                 break;
862                         case QM_MR_VERB_FQPN:
863                                 /* Parked */
864 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
865                                 fq = get_fq_table_entry(msg->fq.contextB);
866 #else
867                                 fq = (void *)(uintptr_t)msg->fq.contextB;
868 #endif
869                                 fq_state_change(p, fq, msg, verb);
870                                 if (fq->cb.fqs)
871                                         fq->cb.fqs(p, fq, &swapped_msg);
872                                 break;
873                         case QM_MR_VERB_DC_ERN:
874                                 /* DCP ERN */
875                                 if (p->cb_dc_ern)
876                                         p->cb_dc_ern(p, msg);
877                                 else if (cb_dc_ern)
878                                         cb_dc_ern(p, msg);
879                                 else {
880                                         static int warn_once;
881
882                                         if (!warn_once) {
883                                                 pr_crit("Leaking DCP ERNs!\n");
884                                                 warn_once = 1;
885                                         }
886                                 }
887                                 break;
888                         default:
889                                 pr_crit("Invalid MR verb 0x%02x\n", verb);
890                         }
891                 } else {
892                         /* Its a software ERN */
893 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
894                         fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
895 #else
896                         fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
897 #endif
898                         fq->cb.ern(p, fq, &swapped_msg);
899                 }
900                 num++;
901                 qm_mr_next(&p->p);
902                 goto mr_loop;
903 mr_done:
904                 qm_mr_cci_consume(&p->p, num);
905         }
906         /*
907          * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
908          * processing. If that interrupt source has meanwhile been re-asserted,
909          * we mustn't clear it here (or in the top-level interrupt handler).
910          */
911         return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
912 }
913
914 /*
915  * remove some slowish-path stuff from the "fast path" and make sure it isn't
916  * inlined.
917  */
918 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
919 {
920         p->vdqcr_owned = NULL;
921         FQLOCK(fq);
922         fq_clear(fq, QMAN_FQ_STATE_VDQCR);
923         FQUNLOCK(fq);
924         wake_up(&affine_queue);
925 }
926
927 /*
928  * The only states that would conflict with other things if they ran at the
929  * same time on the same cpu are:
930  *
931  *   (i) setting/clearing vdqcr_owned, and
932  *  (ii) clearing the NE (Not Empty) flag.
933  *
934  * Both are safe. Because;
935  *
936  *   (i) this clearing can only occur after qman_set_vdq() has set the
937  *       vdqcr_owned field (which it does before setting VDQCR), and
938  *       qman_volatile_dequeue() blocks interrupts and preemption while this is
939  *       done so that we can't interfere.
940  *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
941  *       with (i) that API prevents us from interfering until it's safe.
942  *
943  * The good thing is that qman_set_vdq() and qman_retire_fq() run far
944  * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
945  * advantage comes from this function not having to "lock" anything at all.
946  *
947  * Note also that the callbacks are invoked at points which are safe against the
948  * above potential conflicts, but that this function itself is not re-entrant
949  * (this is because the function tracks one end of each FIFO in the portal and
950  * we do *not* want to lock that). So the consequence is that it is safe for
951  * user callbacks to call into any QMan API.
952  */
953 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
954                                               unsigned int poll_limit)
955 {
956         const struct qm_dqrr_entry *dq;
957         struct qman_fq *fq;
958         enum qman_cb_dqrr_result res;
959         unsigned int limit = 0;
960 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
961         struct qm_dqrr_entry *shadow;
962 #endif
963         do {
964                 qm_dqrr_pvb_update(&p->p);
965                 dq = qm_dqrr_current(&p->p);
966                 if (unlikely(!dq))
967                         break;
968 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
969         /* If running on an LE system the fields of the
970          * dequeue entry must be swapper.  Because the
971          * QMan HW will ignore writes the DQRR entry is
972          * copied and the index stored within the copy
973          */
974                 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
975                 *shadow = *dq;
976                 dq = shadow;
977                 shadow->fqid = be32_to_cpu(shadow->fqid);
978                 shadow->seqnum = be16_to_cpu(shadow->seqnum);
979                 hw_fd_to_cpu(&shadow->fd);
980 #endif
981
982                 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
983                         /*
984                          * VDQCR: don't trust context_b as the FQ may have
985                          * been configured for h/w consumption and we're
986                          * draining it post-retirement.
987                          */
988                         fq = p->vdqcr_owned;
989                         /*
990                          * We only set QMAN_FQ_STATE_NE when retiring, so we
991                          * only need to check for clearing it when doing
992                          * volatile dequeues.  It's one less thing to check
993                          * in the critical path (SDQCR).
994                          */
995                         if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
996                                 fq_clear(fq, QMAN_FQ_STATE_NE);
997                         /*
998                          * This is duplicated from the SDQCR code, but we
999                          * have stuff to do before *and* after this callback,
1000                          * and we don't want multiple if()s in the critical
1001                          * path (SDQCR).
1002                          */
1003                         res = fq->cb.dqrr(p, fq, dq);
1004                         if (res == qman_cb_dqrr_stop)
1005                                 break;
1006                         /* Check for VDQCR completion */
1007                         if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1008                                 clear_vdqcr(p, fq);
1009                 } else {
1010                         /* SDQCR: context_b points to the FQ */
1011 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1012                         fq = get_fq_table_entry(dq->contextB);
1013 #else
1014                         fq = (void *)(uintptr_t)dq->contextB;
1015 #endif
1016                         /* Now let the callback do its stuff */
1017                         res = fq->cb.dqrr(p, fq, dq);
1018                         /*
1019                          * The callback can request that we exit without
1020                          * consuming this entry nor advancing;
1021                          */
1022                         if (res == qman_cb_dqrr_stop)
1023                                 break;
1024                 }
1025                 /* Interpret 'dq' from a driver perspective. */
1026                 /*
1027                  * Parking isn't possible unless HELDACTIVE was set. NB,
1028                  * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1029                  * check for HELDACTIVE to cover both.
1030                  */
1031                 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1032                             (res != qman_cb_dqrr_park));
1033                 /* just means "skip it, I'll consume it myself later on" */
1034                 if (res != qman_cb_dqrr_defer)
1035                         qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1036                                                  res == qman_cb_dqrr_park);
1037                 /* Move forward */
1038                 qm_dqrr_next(&p->p);
1039                 /*
1040                  * Entry processed and consumed, increment our counter.  The
1041                  * callback can request that we exit after consuming the
1042                  * entry, and we also exit if we reach our processing limit,
1043                  * so loop back only if neither of these conditions is met.
1044                  */
1045         } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1046
1047         return limit;
1048 }
1049
1050 int qman_irqsource_add(u32 bits)
1051 {
1052         struct qman_portal *p = get_affine_portal();
1053
1054         bits = bits & QM_PIRQ_VISIBLE;
1055
1056         /* Clear any previously remaining interrupt conditions in
1057          * QCSP_ISR. This prevents raising a false interrupt when
1058          * interrupt conditions are enabled in QCSP_IER.
1059          */
1060         qm_isr_status_clear(&p->p, bits);
1061         dpaa_set_bits(bits, &p->irq_sources);
1062         qm_isr_enable_write(&p->p, p->irq_sources);
1063
1064         return 0;
1065 }
1066
1067 int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
1068 {
1069         bits = bits & QM_PIRQ_VISIBLE;
1070
1071         /* Clear any previously remaining interrupt conditions in
1072          * QCSP_ISR. This prevents raising a false interrupt when
1073          * interrupt conditions are enabled in QCSP_IER.
1074          */
1075         qm_isr_status_clear(&p->p, bits);
1076         dpaa_set_bits(bits, &p->irq_sources);
1077         qm_isr_enable_write(&p->p, p->irq_sources);
1078
1079         return 0;
1080 }
1081
1082 int qman_irqsource_remove(u32 bits)
1083 {
1084         struct qman_portal *p = get_affine_portal();
1085         u32 ier;
1086
1087         /* Our interrupt handler only processes+clears status register bits that
1088          * are in p->irq_sources. As we're trimming that mask, if one of them
1089          * were to assert in the status register just before we remove it from
1090          * the enable register, there would be an interrupt-storm when we
1091          * release the IRQ lock. So we wait for the enable register update to
1092          * take effect in h/w (by reading it back) and then clear all other bits
1093          * in the status register. Ie. we clear them from ISR once it's certain
1094          * IER won't allow them to reassert.
1095          */
1096
1097         bits &= QM_PIRQ_VISIBLE;
1098         dpaa_clear_bits(bits, &p->irq_sources);
1099         qm_isr_enable_write(&p->p, p->irq_sources);
1100         ier = qm_isr_enable_read(&p->p);
1101         /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1102          * data-dependency, ie. to protect against re-ordering.
1103          */
1104         qm_isr_status_clear(&p->p, ~ier);
1105         return 0;
1106 }
1107
1108 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
1109 {
1110         u32 ier;
1111
1112         /* Our interrupt handler only processes+clears status register bits that
1113          * are in p->irq_sources. As we're trimming that mask, if one of them
1114          * were to assert in the status register just before we remove it from
1115          * the enable register, there would be an interrupt-storm when we
1116          * release the IRQ lock. So we wait for the enable register update to
1117          * take effect in h/w (by reading it back) and then clear all other bits
1118          * in the status register. Ie. we clear them from ISR once it's certain
1119          * IER won't allow them to reassert.
1120          */
1121
1122         bits &= QM_PIRQ_VISIBLE;
1123         dpaa_clear_bits(bits, &p->irq_sources);
1124         qm_isr_enable_write(&p->p, p->irq_sources);
1125         ier = qm_isr_enable_read(&p->p);
1126         /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1127          * data-dependency, ie. to protect against re-ordering.
1128          */
1129         qm_isr_status_clear(&p->p, ~ier);
1130         return 0;
1131 }
1132
1133 u16 qman_affine_channel(int cpu)
1134 {
1135         if (cpu < 0) {
1136                 struct qman_portal *portal = get_affine_portal();
1137
1138                 cpu = portal->config->cpu;
1139         }
1140         DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1141         return affine_channels[cpu];
1142 }
1143
1144 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1145                                  void **bufs,
1146                                  struct qman_portal *p)
1147 {
1148         struct qm_portal *portal = &p->p;
1149         register struct qm_dqrr *dqrr = &portal->dqrr;
1150         struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
1151         struct qman_fq *fq;
1152         unsigned int limit = 0, rx_number = 0;
1153         uint32_t consume = 0;
1154
1155         do {
1156                 qm_dqrr_pvb_update(&p->p);
1157                 if (!dqrr->fill)
1158                         break;
1159
1160                 dq[rx_number] = dqrr->cursor;
1161                 dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
1162                 /* Prefetch the next DQRR entry */
1163                 rte_prefetch0(dqrr->cursor);
1164
1165 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1166                 /* If running on an LE system the fields of the
1167                  * dequeue entry must be swapper.  Because the
1168                  * QMan HW will ignore writes the DQRR entry is
1169                  * copied and the index stored within the copy
1170                  */
1171                 shadow[rx_number] =
1172                         &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
1173                 shadow[rx_number]->fd.opaque_addr =
1174                         dq[rx_number]->fd.opaque_addr;
1175                 shadow[rx_number]->fd.addr =
1176                         be40_to_cpu(dq[rx_number]->fd.addr);
1177                 shadow[rx_number]->fd.opaque =
1178                         be32_to_cpu(dq[rx_number]->fd.opaque);
1179 #else
1180                 shadow[rx_number] = dq[rx_number];
1181 #endif
1182
1183                 /* SDQCR: context_b points to the FQ */
1184 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1185                 fq = qman_fq_lookup_table[dq[rx_number]->contextB];
1186 #else
1187                 fq = (void *)dq[rx_number]->contextB;
1188 #endif
1189                 if (fq->cb.dqrr_prepare)
1190                         fq->cb.dqrr_prepare(shadow[rx_number],
1191                                             &bufs[rx_number]);
1192
1193                 consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
1194                 rx_number++;
1195                 --dqrr->fill;
1196         } while (++limit < poll_limit);
1197
1198         if (rx_number)
1199                 fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
1200
1201         /* Consume all the DQRR enries together */
1202         qm_out(DQRR_DCAP, (1 << 8) | consume);
1203
1204         return rx_number;
1205 }
1206
1207 void qman_clear_irq(void)
1208 {
1209         struct qman_portal *p = get_affine_portal();
1210         u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
1211                 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
1212         qm_isr_status_clear(&p->p, clear);
1213 }
1214
1215 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1216                         void **bufs)
1217 {
1218         const struct qm_dqrr_entry *dq;
1219         struct qman_fq *fq;
1220         enum qman_cb_dqrr_result res;
1221         unsigned int limit = 0;
1222         struct qman_portal *p = get_affine_portal();
1223 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1224         struct qm_dqrr_entry *shadow;
1225 #endif
1226         unsigned int rx_number = 0;
1227
1228         do {
1229                 qm_dqrr_pvb_update(&p->p);
1230                 dq = qm_dqrr_current(&p->p);
1231                 if (!dq)
1232                         break;
1233 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1234                 /*
1235                  * If running on an LE system the fields of the
1236                  * dequeue entry must be swapper.  Because the
1237                  * QMan HW will ignore writes the DQRR entry is
1238                  * copied and the index stored within the copy
1239                  */
1240                 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1241                 *shadow = *dq;
1242                 dq = shadow;
1243                 shadow->fqid = be32_to_cpu(shadow->fqid);
1244                 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1245                 hw_fd_to_cpu(&shadow->fd);
1246 #endif
1247
1248                /* SDQCR: context_b points to the FQ */
1249 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1250                 fq = get_fq_table_entry(dq->contextB);
1251 #else
1252                 fq = (void *)(uintptr_t)dq->contextB;
1253 #endif
1254                 /* Now let the callback do its stuff */
1255                 res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
1256                                          dq, &bufs[rx_number]);
1257                 rx_number++;
1258                 /* Interpret 'dq' from a driver perspective. */
1259                 /*
1260                  * Parking isn't possible unless HELDACTIVE was set. NB,
1261                  * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1262                  * check for HELDACTIVE to cover both.
1263                  */
1264                 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1265                             (res != qman_cb_dqrr_park));
1266                 if (res != qman_cb_dqrr_defer)
1267                         qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1268                                                  res == qman_cb_dqrr_park);
1269                 /* Move forward */
1270                 qm_dqrr_next(&p->p);
1271                 /*
1272                  * Entry processed and consumed, increment our counter.  The
1273                  * callback can request that we exit after consuming the
1274                  * entry, and we also exit if we reach our processing limit,
1275                  * so loop back only if neither of these conditions is met.
1276                  */
1277         } while (++limit < poll_limit);
1278
1279         return limit;
1280 }
1281
1282 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1283 {
1284         struct qman_portal *p = get_affine_portal();
1285         const struct qm_dqrr_entry *dq;
1286 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1287         struct qm_dqrr_entry *shadow;
1288 #endif
1289
1290         qm_dqrr_pvb_update(&p->p);
1291         dq = qm_dqrr_current(&p->p);
1292         if (!dq)
1293                 return NULL;
1294
1295         if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1296                 /* Invalid DQRR - put the portal and consume the DQRR.
1297                  * Return NULL to user as no packet is seen.
1298                  */
1299                 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1300                 return NULL;
1301         }
1302
1303 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1304         shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1305         *shadow = *dq;
1306         dq = shadow;
1307         shadow->fqid = be32_to_cpu(shadow->fqid);
1308         shadow->seqnum = be16_to_cpu(shadow->seqnum);
1309         hw_fd_to_cpu(&shadow->fd);
1310 #endif
1311
1312         if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1313                 fq_clear(fq, QMAN_FQ_STATE_NE);
1314
1315         return (struct qm_dqrr_entry *)dq;
1316 }
1317
1318 void qman_dqrr_consume(struct qman_fq *fq,
1319                        struct qm_dqrr_entry *dq)
1320 {
1321         struct qman_portal *p = get_affine_portal();
1322
1323         if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1324                 clear_vdqcr(p, fq);
1325
1326         qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1327         qm_dqrr_next(&p->p);
1328 }
1329
1330 int qman_poll_dqrr(unsigned int limit)
1331 {
1332         struct qman_portal *p = get_affine_portal();
1333         int ret;
1334
1335         ret = __poll_portal_fast(p, limit);
1336         return ret;
1337 }
1338
1339 void qman_poll(void)
1340 {
1341         struct qman_portal *p = get_affine_portal();
1342
1343         if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1344                 if (!(p->slowpoll--)) {
1345                         u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1346                         u32 active = __poll_portal_slow(p, is);
1347
1348                         if (active) {
1349                                 qm_isr_status_clear(&p->p, active);
1350                                 p->slowpoll = SLOW_POLL_BUSY;
1351                         } else
1352                                 p->slowpoll = SLOW_POLL_IDLE;
1353                 }
1354         }
1355         if ((~p->irq_sources) & QM_PIRQ_DQRI)
1356                 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1357 }
1358
1359 void qman_stop_dequeues(void)
1360 {
1361         struct qman_portal *p = get_affine_portal();
1362
1363         qman_stop_dequeues_ex(p);
1364 }
1365
1366 void qman_start_dequeues(void)
1367 {
1368         struct qman_portal *p = get_affine_portal();
1369
1370         DPAA_ASSERT(p->dqrr_disable_ref > 0);
1371         if (!(--p->dqrr_disable_ref))
1372                 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1373 }
1374
1375 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
1376 {
1377         struct qman_portal *p = qp ? qp : get_affine_portal();
1378
1379         pools &= p->config->pools;
1380         p->sdqcr |= pools;
1381         qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1382 }
1383
1384 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
1385 {
1386         struct qman_portal *p = qp ? qp : get_affine_portal();
1387
1388         pools &= p->config->pools;
1389         p->sdqcr &= ~pools;
1390         qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1391 }
1392
1393 u32 qman_static_dequeue_get(struct qman_portal *qp)
1394 {
1395         struct qman_portal *p = qp ? qp : get_affine_portal();
1396         return p->sdqcr;
1397 }
1398
1399 void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
1400 {
1401         struct qman_portal *p = get_affine_portal();
1402
1403         qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1404 }
1405
1406 void qman_dca_index(u8 index, int park_request)
1407 {
1408         struct qman_portal *p = get_affine_portal();
1409
1410         qm_dqrr_cdc_consume_1(&p->p, index, park_request);
1411 }
1412
1413 /* Frame queue API */
1414 static const char *mcr_result_str(u8 result)
1415 {
1416         switch (result) {
1417         case QM_MCR_RESULT_NULL:
1418                 return "QM_MCR_RESULT_NULL";
1419         case QM_MCR_RESULT_OK:
1420                 return "QM_MCR_RESULT_OK";
1421         case QM_MCR_RESULT_ERR_FQID:
1422                 return "QM_MCR_RESULT_ERR_FQID";
1423         case QM_MCR_RESULT_ERR_FQSTATE:
1424                 return "QM_MCR_RESULT_ERR_FQSTATE";
1425         case QM_MCR_RESULT_ERR_NOTEMPTY:
1426                 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1427         case QM_MCR_RESULT_PENDING:
1428                 return "QM_MCR_RESULT_PENDING";
1429         case QM_MCR_RESULT_ERR_BADCOMMAND:
1430                 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1431         }
1432         return "<unknown MCR result>";
1433 }
1434
1435 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1436 {
1437         struct qm_fqd fqd;
1438         struct qm_mcr_queryfq_np np;
1439         struct qm_mc_command *mcc;
1440         struct qm_mc_result *mcr;
1441         struct qman_portal *p;
1442
1443         if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1444                 int ret = qman_alloc_fqid(&fqid);
1445
1446                 if (ret)
1447                         return ret;
1448         }
1449         spin_lock_init(&fq->fqlock);
1450         fq->fqid = fqid;
1451         fq->fqid_le = cpu_to_be32(fqid);
1452         fq->flags = flags;
1453         fq->state = qman_fq_state_oos;
1454         fq->cgr_groupid = 0;
1455 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1456         if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1457                 pr_info("Find empty table entry failed\n");
1458                 return -ENOMEM;
1459         }
1460         fq->qman_fq_lookup_table = qman_fq_lookup_table;
1461 #endif
1462         if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1463                 return 0;
1464         /* Everything else is AS_IS support */
1465         p = get_affine_portal();
1466         mcc = qm_mc_start(&p->p);
1467         mcc->queryfq.fqid = cpu_to_be32(fqid);
1468         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1469         while (!(mcr = qm_mc_result(&p->p)))
1470                 cpu_relax();
1471         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1472         if (mcr->result != QM_MCR_RESULT_OK) {
1473                 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1474                 goto err;
1475         }
1476         fqd = mcr->queryfq.fqd;
1477         hw_fqd_to_cpu(&fqd);
1478         mcc = qm_mc_start(&p->p);
1479         mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1480         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1481         while (!(mcr = qm_mc_result(&p->p)))
1482                 cpu_relax();
1483         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1484         if (mcr->result != QM_MCR_RESULT_OK) {
1485                 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1486                 goto err;
1487         }
1488         np = mcr->queryfq_np;
1489         /* Phew, have queryfq and queryfq_np results, stitch together
1490          * the FQ object from those.
1491          */
1492         fq->cgr_groupid = fqd.cgid;
1493         switch (np.state & QM_MCR_NP_STATE_MASK) {
1494         case QM_MCR_NP_STATE_OOS:
1495                 break;
1496         case QM_MCR_NP_STATE_RETIRED:
1497                 fq->state = qman_fq_state_retired;
1498                 if (np.frm_cnt)
1499                         fq_set(fq, QMAN_FQ_STATE_NE);
1500                 break;
1501         case QM_MCR_NP_STATE_TEN_SCHED:
1502         case QM_MCR_NP_STATE_TRU_SCHED:
1503         case QM_MCR_NP_STATE_ACTIVE:
1504                 fq->state = qman_fq_state_sched;
1505                 if (np.state & QM_MCR_NP_STATE_R)
1506                         fq_set(fq, QMAN_FQ_STATE_CHANGING);
1507                 break;
1508         case QM_MCR_NP_STATE_PARKED:
1509                 fq->state = qman_fq_state_parked;
1510                 break;
1511         default:
1512                 DPAA_ASSERT(NULL == "invalid FQ state");
1513         }
1514         if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1515                 fq->state |= QMAN_FQ_STATE_CGR_EN;
1516         return 0;
1517 err:
1518         if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1519                 qman_release_fqid(fqid);
1520         return -EIO;
1521 }
1522
1523 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1524 {
1525         /*
1526          * We don't need to lock the FQ as it is a pre-condition that the FQ be
1527          * quiesced. Instead, run some checks.
1528          */
1529         switch (fq->state) {
1530         case qman_fq_state_parked:
1531                 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1532                 /* Fallthrough */
1533         case qman_fq_state_oos:
1534                 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1535                         qman_release_fqid(fq->fqid);
1536 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1537                 clear_fq_table_entry(fq->key);
1538 #endif
1539                 return;
1540         default:
1541                 break;
1542         }
1543         DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1544 }
1545
1546 u32 qman_fq_fqid(struct qman_fq *fq)
1547 {
1548         return fq->fqid;
1549 }
1550
1551 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1552 {
1553         if (state)
1554                 *state = fq->state;
1555         if (flags)
1556                 *flags = fq->flags;
1557 }
1558
1559 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1560 {
1561         struct qm_mc_command *mcc;
1562         struct qm_mc_result *mcr;
1563         struct qman_portal *p;
1564
1565         u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1566                 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1567
1568         if ((fq->state != qman_fq_state_oos) &&
1569             (fq->state != qman_fq_state_parked))
1570                 return -EINVAL;
1571 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1572         if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1573                 return -EINVAL;
1574 #endif
1575         if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1576                 /* And can't be set at the same time as TDTHRESH */
1577                 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1578                         return -EINVAL;
1579         }
1580         /* Issue an INITFQ_[PARKED|SCHED] management command */
1581         p = get_affine_portal();
1582         FQLOCK(fq);
1583         if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1584                      ((fq->state != qman_fq_state_oos) &&
1585                                 (fq->state != qman_fq_state_parked)))) {
1586                 FQUNLOCK(fq);
1587                 return -EBUSY;
1588         }
1589         mcc = qm_mc_start(&p->p);
1590         if (opts)
1591                 mcc->initfq = *opts;
1592         mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1593         mcc->initfq.count = 0;
1594         /*
1595          * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1596          * demux pointer. Otherwise, the caller-provided value is allowed to
1597          * stand, don't overwrite it.
1598          */
1599         if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1600                 dma_addr_t phys_fq;
1601
1602                 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1603 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1604                 mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
1605 #else
1606                 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1607 #endif
1608                 /*
1609                  *  and the physical address - NB, if the user wasn't trying to
1610                  * set CONTEXTA, clear the stashing settings.
1611                  */
1612                 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1613                         mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1614                         memset(&mcc->initfq.fqd.context_a, 0,
1615                                sizeof(mcc->initfq.fqd.context_a));
1616                 } else {
1617                         phys_fq = rte_mem_virt2iova(fq);
1618                         qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1619                 }
1620         }
1621         if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1622                 mcc->initfq.fqd.dest.channel = p->config->channel;
1623                 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1624                         mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1625                         mcc->initfq.fqd.dest.wq = 4;
1626                 }
1627         }
1628         mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1629         cpu_to_hw_fqd(&mcc->initfq.fqd);
1630         qm_mc_commit(&p->p, myverb);
1631         while (!(mcr = qm_mc_result(&p->p)))
1632                 cpu_relax();
1633         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1634         res = mcr->result;
1635         if (res != QM_MCR_RESULT_OK) {
1636                 FQUNLOCK(fq);
1637                 return -EIO;
1638         }
1639         if (opts) {
1640                 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1641                         if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1642                                 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1643                         else
1644                                 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1645                 }
1646                 if (opts->we_mask & QM_INITFQ_WE_CGID)
1647                         fq->cgr_groupid = opts->fqd.cgid;
1648         }
1649         fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1650                 qman_fq_state_sched : qman_fq_state_parked;
1651         FQUNLOCK(fq);
1652         return 0;
1653 }
1654
1655 int qman_schedule_fq(struct qman_fq *fq)
1656 {
1657         struct qm_mc_command *mcc;
1658         struct qm_mc_result *mcr;
1659         struct qman_portal *p;
1660
1661         int ret = 0;
1662         u8 res;
1663
1664         if (fq->state != qman_fq_state_parked)
1665                 return -EINVAL;
1666 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1667         if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1668                 return -EINVAL;
1669 #endif
1670         /* Issue a ALTERFQ_SCHED management command */
1671         p = get_affine_portal();
1672
1673         FQLOCK(fq);
1674         if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1675                      (fq->state != qman_fq_state_parked))) {
1676                 ret = -EBUSY;
1677                 goto out;
1678         }
1679         mcc = qm_mc_start(&p->p);
1680         mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1681         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1682         while (!(mcr = qm_mc_result(&p->p)))
1683                 cpu_relax();
1684         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1685         res = mcr->result;
1686         if (res != QM_MCR_RESULT_OK) {
1687                 ret = -EIO;
1688                 goto out;
1689         }
1690         fq->state = qman_fq_state_sched;
1691 out:
1692         FQUNLOCK(fq);
1693
1694         return ret;
1695 }
1696
1697 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1698 {
1699         struct qm_mc_command *mcc;
1700         struct qm_mc_result *mcr;
1701         struct qman_portal *p;
1702
1703         int rval;
1704         u8 res;
1705
1706         if ((fq->state != qman_fq_state_parked) &&
1707             (fq->state != qman_fq_state_sched))
1708                 return -EINVAL;
1709 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1710         if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1711                 return -EINVAL;
1712 #endif
1713         p = get_affine_portal();
1714
1715         FQLOCK(fq);
1716         if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1717                      (fq->state == qman_fq_state_retired) ||
1718                                 (fq->state == qman_fq_state_oos))) {
1719                 rval = -EBUSY;
1720                 goto out;
1721         }
1722         rval = table_push_fq(p, fq);
1723         if (rval)
1724                 goto out;
1725         mcc = qm_mc_start(&p->p);
1726         mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1727         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1728         while (!(mcr = qm_mc_result(&p->p)))
1729                 cpu_relax();
1730         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1731         res = mcr->result;
1732         /*
1733          * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1734          * and defer the flags until FQRNI or FQRN (respectively) show up. But
1735          * "Friendly" is to process OK immediately, and not set CHANGING. We do
1736          * friendly, otherwise the caller doesn't necessarily have a fully
1737          * "retired" FQ on return even if the retirement was immediate. However
1738          * this does mean some code duplication between here and
1739          * fq_state_change().
1740          */
1741         if (likely(res == QM_MCR_RESULT_OK)) {
1742                 rval = 0;
1743                 /* Process 'fq' right away, we'll ignore FQRNI */
1744                 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1745                         fq_set(fq, QMAN_FQ_STATE_NE);
1746                 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1747                         fq_set(fq, QMAN_FQ_STATE_ORL);
1748                 else
1749                         table_del_fq(p, fq);
1750                 if (flags)
1751                         *flags = fq->flags;
1752                 fq->state = qman_fq_state_retired;
1753                 if (fq->cb.fqs) {
1754                         /*
1755                          * Another issue with supporting "immediate" retirement
1756                          * is that we're forced to drop FQRNIs, because by the
1757                          * time they're seen it may already be "too late" (the
1758                          * fq may have been OOS'd and free()'d already). But if
1759                          * the upper layer wants a callback whether it's
1760                          * immediate or not, we have to fake a "MR" entry to
1761                          * look like an FQRNI...
1762                          */
1763                         struct qm_mr_entry msg;
1764
1765                         msg.ern.verb = QM_MR_VERB_FQRNI;
1766                         msg.fq.fqs = mcr->alterfq.fqs;
1767                         msg.fq.fqid = fq->fqid;
1768 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1769                         msg.fq.contextB = fq->key;
1770 #else
1771                         msg.fq.contextB = (u32)(uintptr_t)fq;
1772 #endif
1773                         fq->cb.fqs(p, fq, &msg);
1774                 }
1775         } else if (res == QM_MCR_RESULT_PENDING) {
1776                 rval = 1;
1777                 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1778         } else {
1779                 rval = -EIO;
1780                 table_del_fq(p, fq);
1781         }
1782 out:
1783         FQUNLOCK(fq);
1784         return rval;
1785 }
1786
1787 int qman_oos_fq(struct qman_fq *fq)
1788 {
1789         struct qm_mc_command *mcc;
1790         struct qm_mc_result *mcr;
1791         struct qman_portal *p;
1792
1793         int ret = 0;
1794         u8 res;
1795
1796         if (fq->state != qman_fq_state_retired)
1797                 return -EINVAL;
1798 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1799         if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1800                 return -EINVAL;
1801 #endif
1802         p = get_affine_portal();
1803         FQLOCK(fq);
1804         if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1805                      (fq->state != qman_fq_state_retired))) {
1806                 ret = -EBUSY;
1807                 goto out;
1808         }
1809         mcc = qm_mc_start(&p->p);
1810         mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1811         qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1812         while (!(mcr = qm_mc_result(&p->p)))
1813                 cpu_relax();
1814         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1815         res = mcr->result;
1816         if (res != QM_MCR_RESULT_OK) {
1817                 ret = -EIO;
1818                 goto out;
1819         }
1820         fq->state = qman_fq_state_oos;
1821 out:
1822         FQUNLOCK(fq);
1823         return ret;
1824 }
1825
1826 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1827 {
1828         struct qm_mc_command *mcc;
1829         struct qm_mc_result *mcr;
1830         struct qman_portal *p;
1831
1832         int ret = 0;
1833         u8 res;
1834         u8 myverb;
1835
1836         if ((fq->state == qman_fq_state_oos) ||
1837             (fq->state == qman_fq_state_retired) ||
1838                 (fq->state == qman_fq_state_parked))
1839                 return -EINVAL;
1840
1841 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1842         if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1843                 return -EINVAL;
1844 #endif
1845         /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1846         p = get_affine_portal();
1847         FQLOCK(fq);
1848         if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1849                      (fq->state == qman_fq_state_parked) ||
1850                         (fq->state == qman_fq_state_oos) ||
1851                         (fq->state == qman_fq_state_retired))) {
1852                 ret = -EBUSY;
1853                 goto out;
1854         }
1855         mcc = qm_mc_start(&p->p);
1856         mcc->alterfq.fqid = fq->fqid;
1857         mcc->alterfq.count = 0;
1858         myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1859
1860         qm_mc_commit(&p->p, myverb);
1861         while (!(mcr = qm_mc_result(&p->p)))
1862                 cpu_relax();
1863         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1864
1865         res = mcr->result;
1866         if (res != QM_MCR_RESULT_OK) {
1867                 ret = -EIO;
1868                 goto out;
1869         }
1870 out:
1871         FQUNLOCK(fq);
1872         return ret;
1873 }
1874
1875 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1876 {
1877         struct qm_mc_command *mcc;
1878         struct qm_mc_result *mcr;
1879         struct qman_portal *p = get_affine_portal();
1880
1881         u8 res;
1882
1883         mcc = qm_mc_start(&p->p);
1884         mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1885         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1886         while (!(mcr = qm_mc_result(&p->p)))
1887                 cpu_relax();
1888         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1889         res = mcr->result;
1890         if (res == QM_MCR_RESULT_OK)
1891                 *fqd = mcr->queryfq.fqd;
1892         hw_fqd_to_cpu(fqd);
1893         if (res != QM_MCR_RESULT_OK)
1894                 return -EIO;
1895         return 0;
1896 }
1897
1898 int qman_query_fq_has_pkts(struct qman_fq *fq)
1899 {
1900         struct qm_mc_command *mcc;
1901         struct qm_mc_result *mcr;
1902         struct qman_portal *p = get_affine_portal();
1903
1904         int ret = 0;
1905         u8 res;
1906
1907         mcc = qm_mc_start(&p->p);
1908         mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1909         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1910         while (!(mcr = qm_mc_result(&p->p)))
1911                 cpu_relax();
1912         res = mcr->result;
1913         if (res == QM_MCR_RESULT_OK)
1914                 ret = !!mcr->queryfq_np.frm_cnt;
1915         return ret;
1916 }
1917
1918 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1919 {
1920         struct qm_mc_command *mcc;
1921         struct qm_mc_result *mcr;
1922         struct qman_portal *p = get_affine_portal();
1923
1924         u8 res;
1925
1926         mcc = qm_mc_start(&p->p);
1927         mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1928         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1929         while (!(mcr = qm_mc_result(&p->p)))
1930                 cpu_relax();
1931         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1932         res = mcr->result;
1933         if (res == QM_MCR_RESULT_OK) {
1934                 *np = mcr->queryfq_np;
1935                 np->fqd_link = be24_to_cpu(np->fqd_link);
1936                 np->odp_seq = be16_to_cpu(np->odp_seq);
1937                 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1938                 np->orp_ea_hseq  = be16_to_cpu(np->orp_ea_hseq);
1939                 np->orp_ea_tseq  = be16_to_cpu(np->orp_ea_tseq);
1940                 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1941                 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1942                 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1943                 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1944                 np->ics_surp = be16_to_cpu(np->ics_surp);
1945                 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1946                 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1947                 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1948                 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1949                 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1950                 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1951                 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1952         }
1953         if (res == QM_MCR_RESULT_ERR_FQID)
1954                 return -ERANGE;
1955         else if (res != QM_MCR_RESULT_OK)
1956                 return -EIO;
1957         return 0;
1958 }
1959
1960 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
1961 {
1962         struct qm_mc_command *mcc;
1963         struct qm_mc_result *mcr;
1964         struct qman_portal *p = get_affine_portal();
1965
1966         mcc = qm_mc_start(&p->p);
1967         mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1968         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1969         while (!(mcr = qm_mc_result(&p->p)))
1970                 cpu_relax();
1971         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1972
1973         if (mcr->result == QM_MCR_RESULT_OK)
1974                 *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
1975         else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
1976                 return -ERANGE;
1977         else if (mcr->result != QM_MCR_RESULT_OK)
1978                 return -EIO;
1979         return 0;
1980 }
1981
1982 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1983 {
1984         struct qm_mc_command *mcc;
1985         struct qm_mc_result *mcr;
1986         struct qman_portal *p = get_affine_portal();
1987
1988         u8 res, myverb;
1989
1990         myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1991                                  QM_MCR_VERB_QUERYWQ;
1992         mcc = qm_mc_start(&p->p);
1993         mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1994         qm_mc_commit(&p->p, myverb);
1995         while (!(mcr = qm_mc_result(&p->p)))
1996                 cpu_relax();
1997         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1998         res = mcr->result;
1999         if (res == QM_MCR_RESULT_OK) {
2000                 int i, array_len;
2001
2002                 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
2003                 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
2004                 for (i = 0; i < array_len; i++)
2005                         wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
2006         }
2007         if (res != QM_MCR_RESULT_OK) {
2008                 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
2009                 return -EIO;
2010         }
2011         return 0;
2012 }
2013
2014 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
2015                        struct qm_mcr_cgrtestwrite *result)
2016 {
2017         struct qm_mc_command *mcc;
2018         struct qm_mc_result *mcr;
2019         struct qman_portal *p = get_affine_portal();
2020
2021         u8 res;
2022
2023         mcc = qm_mc_start(&p->p);
2024         mcc->cgrtestwrite.cgid = cgr->cgrid;
2025         mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
2026         mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
2027         qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
2028         while (!(mcr = qm_mc_result(&p->p)))
2029                 cpu_relax();
2030         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
2031         res = mcr->result;
2032         if (res == QM_MCR_RESULT_OK)
2033                 *result = mcr->cgrtestwrite;
2034         if (res != QM_MCR_RESULT_OK) {
2035                 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
2036                 return -EIO;
2037         }
2038         return 0;
2039 }
2040
2041 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
2042 {
2043         struct qm_mc_command *mcc;
2044         struct qm_mc_result *mcr;
2045         struct qman_portal *p = get_affine_portal();
2046         u8 res;
2047         unsigned int i;
2048
2049         mcc = qm_mc_start(&p->p);
2050         mcc->querycgr.cgid = cgr->cgrid;
2051         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2052         while (!(mcr = qm_mc_result(&p->p)))
2053                 cpu_relax();
2054         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2055         res = mcr->result;
2056         if (res == QM_MCR_RESULT_OK)
2057                 *cgrd = mcr->querycgr;
2058         if (res != QM_MCR_RESULT_OK) {
2059                 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
2060                 return -EIO;
2061         }
2062         cgrd->cgr.wr_parm_g.word =
2063                 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
2064         cgrd->cgr.wr_parm_y.word =
2065                 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
2066         cgrd->cgr.wr_parm_r.word =
2067                 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
2068         cgrd->cgr.cscn_targ =  be32_to_cpu(cgrd->cgr.cscn_targ);
2069         cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
2070         for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
2071                 cgrd->cscn_targ_swp[i] =
2072                         be32_to_cpu(cgrd->cscn_targ_swp[i]);
2073         return 0;
2074 }
2075
2076 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
2077 {
2078         struct qm_mc_result *mcr;
2079         struct qman_portal *p = get_affine_portal();
2080         u8 res;
2081         unsigned int i;
2082
2083         qm_mc_start(&p->p);
2084         qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
2085         while (!(mcr = qm_mc_result(&p->p)))
2086                 cpu_relax();
2087         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2088                         QM_MCC_VERB_QUERYCONGESTION);
2089         res = mcr->result;
2090         if (res == QM_MCR_RESULT_OK)
2091                 *congestion = mcr->querycongestion;
2092         if (res != QM_MCR_RESULT_OK) {
2093                 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
2094                 return -EIO;
2095         }
2096         for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
2097                 congestion->state.state[i] =
2098                         be32_to_cpu(congestion->state.state[i]);
2099         return 0;
2100 }
2101
2102 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
2103 {
2104         struct qman_portal *p = get_affine_portal();
2105         uint32_t vdqcr;
2106         int ret = -EBUSY;
2107
2108         vdqcr = vdqcr_flags;
2109         vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
2110
2111         if ((fq->state != qman_fq_state_parked) &&
2112             (fq->state != qman_fq_state_retired)) {
2113                 ret = -EINVAL;
2114                 goto out;
2115         }
2116         if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
2117                 ret = -EBUSY;
2118                 goto out;
2119         }
2120         vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2121
2122         if (!p->vdqcr_owned) {
2123                 FQLOCK(fq);
2124                 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2125                         goto escape;
2126                 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2127                 FQUNLOCK(fq);
2128                 p->vdqcr_owned = fq;
2129                 ret = 0;
2130         }
2131 escape:
2132         if (!ret)
2133                 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2134
2135 out:
2136         return ret;
2137 }
2138
2139 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
2140                           u32 vdqcr)
2141 {
2142         struct qman_portal *p;
2143         int ret = -EBUSY;
2144
2145         if ((fq->state != qman_fq_state_parked) &&
2146             (fq->state != qman_fq_state_retired))
2147                 return -EINVAL;
2148         if (vdqcr & QM_VDQCR_FQID_MASK)
2149                 return -EINVAL;
2150         if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2151                 return -EBUSY;
2152         vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2153
2154         p = get_affine_portal();
2155
2156         if (!p->vdqcr_owned) {
2157                 FQLOCK(fq);
2158                 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2159                         goto escape;
2160                 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2161                 FQUNLOCK(fq);
2162                 p->vdqcr_owned = fq;
2163                 ret = 0;
2164         }
2165 escape:
2166         if (ret)
2167                 return ret;
2168
2169         /* VDQCR is set */
2170         qm_dqrr_vdqcr_set(&p->p, vdqcr);
2171         return 0;
2172 }
2173
2174 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
2175 {
2176         if (avail)
2177                 qm_eqcr_cce_prefetch(&p->p);
2178         else
2179                 qm_eqcr_cce_update(&p->p);
2180 }
2181
2182 int qman_eqcr_is_empty(void)
2183 {
2184         struct qman_portal *p = get_affine_portal();
2185         u8 avail;
2186
2187         update_eqcr_ci(p, 0);
2188         avail = qm_eqcr_get_fill(&p->p);
2189         return (avail == 0);
2190 }
2191
2192 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
2193 {
2194         if (affine) {
2195                 struct qman_portal *p = get_affine_portal();
2196
2197                 p->cb_dc_ern = handler;
2198         } else
2199                 cb_dc_ern = handler;
2200 }
2201
2202 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
2203                                         struct qman_fq *fq,
2204                                         const struct qm_fd *fd,
2205                                         u32 flags)
2206 {
2207         struct qm_eqcr_entry *eq;
2208         u8 avail;
2209
2210         if (p->use_eqcr_ci_stashing) {
2211                 /*
2212                  * The stashing case is easy, only update if we need to in
2213                  * order to try and liberate ring entries.
2214                  */
2215                 eq = qm_eqcr_start_stash(&p->p);
2216         } else {
2217                 /*
2218                  * The non-stashing case is harder, need to prefetch ahead of
2219                  * time.
2220                  */
2221                 avail = qm_eqcr_get_avail(&p->p);
2222                 if (avail < 2)
2223                         update_eqcr_ci(p, avail);
2224                 eq = qm_eqcr_start_no_stash(&p->p);
2225         }
2226
2227         if (unlikely(!eq))
2228                 return NULL;
2229
2230         if (flags & QMAN_ENQUEUE_FLAG_DCA)
2231                 eq->dca = QM_EQCR_DCA_ENABLE |
2232                         ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
2233                                         QM_EQCR_DCA_PARK : 0) |
2234                         ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
2235         eq->fqid = cpu_to_be32(fq->fqid);
2236 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2237         eq->tag = cpu_to_be32(fq->key);
2238 #else
2239         eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2240 #endif
2241         eq->fd = *fd;
2242         cpu_to_hw_fd(&eq->fd);
2243         return eq;
2244 }
2245
2246 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
2247 {
2248         struct qman_portal *p = get_affine_portal();
2249         struct qm_eqcr_entry *eq;
2250
2251         eq = try_p_eq_start(p, fq, fd, flags);
2252         if (!eq)
2253                 return -EBUSY;
2254         /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2255         qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
2256                 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2257         /* Factor the below out, it's used from qman_enqueue_orp() too */
2258         return 0;
2259 }
2260
2261 int qman_enqueue_multi(struct qman_fq *fq,
2262                        const struct qm_fd *fd, u32 *flags,
2263                 int frames_to_send)
2264 {
2265         struct qman_portal *p = get_affine_portal();
2266         struct qm_portal *portal = &p->p;
2267
2268         register struct qm_eqcr *eqcr = &portal->eqcr;
2269         struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2270
2271         u8 i = 0, diff, old_ci, sent = 0;
2272
2273         /* Update the available entries if no entry is free */
2274         if (!eqcr->available) {
2275                 old_ci = eqcr->ci;
2276                 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2277                 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2278                 eqcr->available += diff;
2279                 if (!diff)
2280                         return 0;
2281         }
2282
2283         /* try to send as many frames as possible */
2284         while (eqcr->available && frames_to_send--) {
2285                 eq->fqid = fq->fqid_le;
2286                 eq->fd.opaque_addr = fd->opaque_addr;
2287                 eq->fd.addr = cpu_to_be40(fd->addr);
2288                 eq->fd.status = cpu_to_be32(fd->status);
2289                 eq->fd.opaque = cpu_to_be32(fd->opaque);
2290                 if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2291                         eq->dca = QM_EQCR_DCA_ENABLE |
2292                                 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2293                 }
2294                 i++;
2295                 eq = (void *)((unsigned long)(eq + 1) &
2296                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2297                 eqcr->available--;
2298                 sent++;
2299                 fd++;
2300         }
2301         lwsync();
2302
2303         /* In order for flushes to complete faster, all lines are recorded in
2304          * 32 bit word.
2305          */
2306         eq = eqcr->cursor;
2307         for (i = 0; i < sent; i++) {
2308                 eq->__dont_write_directly__verb =
2309                         QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2310                 prev_eq = eq;
2311                 eq = (void *)((unsigned long)(eq + 1) &
2312                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2313                 if (unlikely((prev_eq + 1) != eq))
2314                         eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2315         }
2316
2317         /* We need  to flush all the lines but without load/store operations
2318          * between them
2319          */
2320         eq = eqcr->cursor;
2321         for (i = 0; i < sent; i++) {
2322                 dcbf(eq);
2323                 eq = (void *)((unsigned long)(eq + 1) &
2324                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2325         }
2326         /* Update cursor for the next call */
2327         eqcr->cursor = eq;
2328         return sent;
2329 }
2330
2331 int
2332 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
2333                       u32 *flags, int frames_to_send)
2334 {
2335         struct qman_portal *p = get_affine_portal();
2336         struct qm_portal *portal = &p->p;
2337
2338         register struct qm_eqcr *eqcr = &portal->eqcr;
2339         struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2340
2341         u8 i = 0, diff, old_ci, sent = 0;
2342
2343         /* Update the available entries if no entry is free */
2344         if (!eqcr->available) {
2345                 old_ci = eqcr->ci;
2346                 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2347                 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2348                 eqcr->available += diff;
2349                 if (!diff)
2350                         return 0;
2351         }
2352
2353         /* try to send as many frames as possible */
2354         while (eqcr->available && frames_to_send--) {
2355                 eq->fqid = fq[sent]->fqid_le;
2356                 eq->fd.opaque_addr = fd->opaque_addr;
2357                 eq->fd.addr = cpu_to_be40(fd->addr);
2358                 eq->fd.status = cpu_to_be32(fd->status);
2359                 eq->fd.opaque = cpu_to_be32(fd->opaque);
2360                 if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2361                         eq->dca = QM_EQCR_DCA_ENABLE |
2362                                 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2363                 }
2364                 i++;
2365
2366                 eq = (void *)((unsigned long)(eq + 1) &
2367                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2368                 eqcr->available--;
2369                 sent++;
2370                 fd++;
2371         }
2372         lwsync();
2373
2374         /* In order for flushes to complete faster, all lines are recorded in
2375          * 32 bit word.
2376          */
2377         eq = eqcr->cursor;
2378         for (i = 0; i < sent; i++) {
2379                 eq->__dont_write_directly__verb =
2380                         QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2381                 prev_eq = eq;
2382                 eq = (void *)((unsigned long)(eq + 1) &
2383                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2384                 if (unlikely((prev_eq + 1) != eq))
2385                         eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2386         }
2387
2388         /* We need  to flush all the lines but without load/store operations
2389          * between them
2390          */
2391         eq = eqcr->cursor;
2392         for (i = 0; i < sent; i++) {
2393                 dcbf(eq);
2394                 eq = (void *)((unsigned long)(eq + 1) &
2395                         (~(unsigned long)(QM_EQCR_SIZE << 6)));
2396         }
2397         /* Update cursor for the next call */
2398         eqcr->cursor = eq;
2399         return sent;
2400 }
2401
2402 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2403                      struct qman_fq *orp, u16 orp_seqnum)
2404 {
2405         struct qman_portal *p  = get_affine_portal();
2406         struct qm_eqcr_entry *eq;
2407
2408         eq = try_p_eq_start(p, fq, fd, flags);
2409         if (!eq)
2410                 return -EBUSY;
2411         /* Process ORP-specifics here */
2412         if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2413                 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2414         else {
2415                 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2416                 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2417                         orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2418                 else
2419                         /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2420                         orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2421         }
2422         eq->seqnum = cpu_to_be16(orp_seqnum);
2423         eq->orp = cpu_to_be32(orp->fqid);
2424         /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2425         qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2426                 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2427                                 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2428                 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2429
2430         return 0;
2431 }
2432
2433 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2434                     struct qm_mcc_initcgr *opts)
2435 {
2436         struct qm_mc_command *mcc;
2437         struct qm_mc_result *mcr;
2438         struct qman_portal *p = get_affine_portal();
2439
2440         u8 res;
2441         u8 verb = QM_MCC_VERB_MODIFYCGR;
2442
2443         mcc = qm_mc_start(&p->p);
2444         if (opts)
2445                 mcc->initcgr = *opts;
2446         mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2447         mcc->initcgr.cgr.wr_parm_g.word =
2448                 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2449         mcc->initcgr.cgr.wr_parm_y.word =
2450                 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2451         mcc->initcgr.cgr.wr_parm_r.word =
2452                 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2453         mcc->initcgr.cgr.cscn_targ =  cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2454         mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2455
2456         mcc->initcgr.cgid = cgr->cgrid;
2457         if (flags & QMAN_CGR_FLAG_USE_INIT)
2458                 verb = QM_MCC_VERB_INITCGR;
2459         qm_mc_commit(&p->p, verb);
2460         while (!(mcr = qm_mc_result(&p->p)))
2461                 cpu_relax();
2462
2463         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2464         res = mcr->result;
2465         return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2466 }
2467
2468 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2469                                         QM_CHANNEL_SWPORTAL0))
2470 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2471 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2472
2473 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2474                     struct qm_mcc_initcgr *opts)
2475 {
2476         struct qm_mcr_querycgr cgr_state;
2477         struct qm_mcc_initcgr local_opts;
2478         int ret;
2479         struct qman_portal *p;
2480
2481         /* We have to check that the provided CGRID is within the limits of the
2482          * data-structures, for obvious reasons. However we'll let h/w take
2483          * care of determining whether it's within the limits of what exists on
2484          * the SoC.
2485          */
2486         if (cgr->cgrid >= __CGR_NUM)
2487                 return -EINVAL;
2488
2489         p = get_affine_portal();
2490
2491         memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2492         cgr->chan = p->config->channel;
2493         spin_lock(&p->cgr_lock);
2494
2495         /* if no opts specified, just add it to the list */
2496         if (!opts)
2497                 goto add_list;
2498
2499         ret = qman_query_cgr(cgr, &cgr_state);
2500         if (ret)
2501                 goto release_lock;
2502         if (opts)
2503                 local_opts = *opts;
2504         if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2505                 local_opts.cgr.cscn_targ_upd_ctrl =
2506                         QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2507         else
2508                 /* Overwrite TARG */
2509                 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2510                                                         TARG_MASK(p);
2511         local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2512
2513         /* send init if flags indicate so */
2514         if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2515                 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2516         else
2517                 ret = qman_modify_cgr(cgr, 0, &local_opts);
2518         if (ret)
2519                 goto release_lock;
2520 add_list:
2521         list_add(&cgr->node, &p->cgr_cbs);
2522
2523         /* Determine if newly added object requires its callback to be called */
2524         ret = qman_query_cgr(cgr, &cgr_state);
2525         if (ret) {
2526                 /* we can't go back, so proceed and return success, but screen
2527                  * and wail to the log file.
2528                  */
2529                 pr_crit("CGR HW state partially modified\n");
2530                 ret = 0;
2531                 goto release_lock;
2532         }
2533         if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2534                                                               cgr->cgrid))
2535                 cgr->cb(p, cgr, 1);
2536 release_lock:
2537         spin_unlock(&p->cgr_lock);
2538         return ret;
2539 }
2540
2541 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2542                            struct qm_mcc_initcgr *opts)
2543 {
2544         struct qm_mcc_initcgr local_opts;
2545         struct qm_mcr_querycgr cgr_state;
2546         int ret;
2547
2548         if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2549                 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2550                 return -EINVAL;
2551         }
2552         /* We have to check that the provided CGRID is within the limits of the
2553          * data-structures, for obvious reasons. However we'll let h/w take
2554          * care of determining whether it's within the limits of what exists on
2555          * the SoC.
2556          */
2557         if (cgr->cgrid >= __CGR_NUM)
2558                 return -EINVAL;
2559
2560         ret = qman_query_cgr(cgr, &cgr_state);
2561         if (ret)
2562                 return ret;
2563
2564         memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2565         if (opts)
2566                 local_opts = *opts;
2567
2568         if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2569                 local_opts.cgr.cscn_targ_upd_ctrl =
2570                                 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2571                                 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2572         else
2573                 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2574                                         TARG_DCP_MASK(dcp_portal);
2575         local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2576
2577         /* send init if flags indicate so */
2578         if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2579                 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2580                                       &local_opts);
2581         else
2582                 ret = qman_modify_cgr(cgr, 0, &local_opts);
2583
2584         return ret;
2585 }
2586
2587 int qman_delete_cgr(struct qman_cgr *cgr)
2588 {
2589         struct qm_mcr_querycgr cgr_state;
2590         struct qm_mcc_initcgr local_opts;
2591         int ret = 0;
2592         struct qman_cgr *i;
2593         struct qman_portal *p = get_affine_portal();
2594
2595         if (cgr->chan != p->config->channel) {
2596                 pr_crit("Attempting to delete cgr from different portal than"
2597                         " it was create: create 0x%x, delete 0x%x\n",
2598                         cgr->chan, p->config->channel);
2599                 ret = -EINVAL;
2600                 goto put_portal;
2601         }
2602         memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2603         spin_lock(&p->cgr_lock);
2604         list_del(&cgr->node);
2605         /*
2606          * If there are no other CGR objects for this CGRID in the list,
2607          * update CSCN_TARG accordingly
2608          */
2609         list_for_each_entry(i, &p->cgr_cbs, node)
2610                 if ((i->cgrid == cgr->cgrid) && i->cb)
2611                         goto release_lock;
2612         ret = qman_query_cgr(cgr, &cgr_state);
2613         if (ret)  {
2614                 /* add back to the list */
2615                 list_add(&cgr->node, &p->cgr_cbs);
2616                 goto release_lock;
2617         }
2618         /* Overwrite TARG */
2619         local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2620         if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2621                 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2622         else
2623                 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2624                                                          ~(TARG_MASK(p));
2625         ret = qman_modify_cgr(cgr, 0, &local_opts);
2626         if (ret)
2627                 /* add back to the list */
2628                 list_add(&cgr->node, &p->cgr_cbs);
2629 release_lock:
2630         spin_unlock(&p->cgr_lock);
2631 put_portal:
2632         return ret;
2633 }
2634
2635 int qman_shutdown_fq(u32 fqid)
2636 {
2637         struct qman_portal *p;
2638         struct qm_portal *low_p;
2639         struct qm_mc_command *mcc;
2640         struct qm_mc_result *mcr;
2641         u8 state;
2642         int orl_empty, fq_empty, drain = 0;
2643         u32 result;
2644         u32 channel, wq;
2645         u16 dest_wq;
2646
2647         p = get_affine_portal();
2648         low_p = &p->p;
2649
2650         /* Determine the state of the FQID */
2651         mcc = qm_mc_start(low_p);
2652         mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2653         qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2654         while (!(mcr = qm_mc_result(low_p)))
2655                 cpu_relax();
2656         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2657         state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2658         if (state == QM_MCR_NP_STATE_OOS)
2659                 return 0; /* Already OOS, no need to do anymore checks */
2660
2661         /* Query which channel the FQ is using */
2662         mcc = qm_mc_start(low_p);
2663         mcc->queryfq.fqid = cpu_to_be32(fqid);
2664         qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2665         while (!(mcr = qm_mc_result(low_p)))
2666                 cpu_relax();
2667         DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2668
2669         /* Need to store these since the MCR gets reused */
2670         dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2671         channel = dest_wq & 0x7;
2672         wq = dest_wq >> 3;
2673
2674         switch (state) {
2675         case QM_MCR_NP_STATE_TEN_SCHED:
2676         case QM_MCR_NP_STATE_TRU_SCHED:
2677         case QM_MCR_NP_STATE_ACTIVE:
2678         case QM_MCR_NP_STATE_PARKED:
2679                 orl_empty = 0;
2680                 mcc = qm_mc_start(low_p);
2681                 mcc->alterfq.fqid = cpu_to_be32(fqid);
2682                 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2683                 while (!(mcr = qm_mc_result(low_p)))
2684                         cpu_relax();
2685                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2686                            QM_MCR_VERB_ALTER_RETIRE);
2687                 result = mcr->result; /* Make a copy as we reuse MCR below */
2688
2689                 if (result == QM_MCR_RESULT_PENDING) {
2690                         /* Need to wait for the FQRN in the message ring, which
2691                          * will only occur once the FQ has been drained.  In
2692                          * order for the FQ to drain the portal needs to be set
2693                          * to dequeue from the channel the FQ is scheduled on
2694                          */
2695                         const struct qm_mr_entry *msg;
2696                         const struct qm_dqrr_entry *dqrr = NULL;
2697                         int found_fqrn = 0;
2698                         __maybe_unused u16 dequeue_wq = 0;
2699
2700                         /* Flag that we need to drain FQ */
2701                         drain = 1;
2702
2703                         if (channel >= qm_channel_pool1 &&
2704                             channel < (u16)(qm_channel_pool1 + 15)) {
2705                                 /* Pool channel, enable the bit in the portal */
2706                                 dequeue_wq = (channel -
2707                                               qm_channel_pool1 + 1) << 4 | wq;
2708                         } else if (channel < qm_channel_pool1) {
2709                                 /* Dedicated channel */
2710                                 dequeue_wq = wq;
2711                         } else {
2712                                 pr_info("Cannot recover FQ 0x%x,"
2713                                         " it is scheduled on channel 0x%x",
2714                                         fqid, channel);
2715                                 return -EBUSY;
2716                         }
2717                         /* Set the sdqcr to drain this channel */
2718                         if (channel < qm_channel_pool1)
2719                                 qm_dqrr_sdqcr_set(low_p,
2720                                                   QM_SDQCR_TYPE_ACTIVE |
2721                                           QM_SDQCR_CHANNELS_DEDICATED);
2722                         else
2723                                 qm_dqrr_sdqcr_set(low_p,
2724                                                   QM_SDQCR_TYPE_ACTIVE |
2725                                                   QM_SDQCR_CHANNELS_POOL_CONV
2726                                                   (channel));
2727                         while (!found_fqrn) {
2728                                 /* Keep draining DQRR while checking the MR*/
2729                                 qm_dqrr_pvb_update(low_p);
2730                                 dqrr = qm_dqrr_current(low_p);
2731                                 while (dqrr) {
2732                                         qm_dqrr_cdc_consume_1ptr(
2733                                                 low_p, dqrr, 0);
2734                                         qm_dqrr_pvb_update(low_p);
2735                                         qm_dqrr_next(low_p);
2736                                         dqrr = qm_dqrr_current(low_p);
2737                                 }
2738                                 /* Process message ring too */
2739                                 qm_mr_pvb_update(low_p);
2740                                 msg = qm_mr_current(low_p);
2741                                 while (msg) {
2742                                         if ((msg->ern.verb &
2743                                              QM_MR_VERB_TYPE_MASK)
2744                                             == QM_MR_VERB_FQRN)
2745                                                 found_fqrn = 1;
2746                                         qm_mr_next(low_p);
2747                                         qm_mr_cci_consume_to_current(low_p);
2748                                         qm_mr_pvb_update(low_p);
2749                                         msg = qm_mr_current(low_p);
2750                                 }
2751                                 cpu_relax();
2752                         }
2753                 }
2754                 if (result != QM_MCR_RESULT_OK &&
2755                     result !=  QM_MCR_RESULT_PENDING) {
2756                         /* error */
2757                         pr_err("qman_retire_fq failed on FQ 0x%x,"
2758                                " result=0x%x\n", fqid, result);
2759                         return -1;
2760                 }
2761                 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2762                         /* ORL had no entries, no need to wait until the
2763                          * ERNs come in.
2764                          */
2765                         orl_empty = 1;
2766                 }
2767                 /* Retirement succeeded, check to see if FQ needs
2768                  * to be drained.
2769                  */
2770                 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2771                         /* FQ is Not Empty, drain using volatile DQ commands */
2772                         fq_empty = 0;
2773                         do {
2774                                 const struct qm_dqrr_entry *dqrr = NULL;
2775                                 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2776
2777                                 qm_dqrr_vdqcr_set(low_p, vdqcr);
2778
2779                                 /* Wait for a dequeue to occur */
2780                                 while (dqrr == NULL) {
2781                                         qm_dqrr_pvb_update(low_p);
2782                                         dqrr = qm_dqrr_current(low_p);
2783                                         if (!dqrr)
2784                                                 cpu_relax();
2785                                 }
2786                                 /* Process the dequeues, making sure to
2787                                  * empty the ring completely.
2788                                  */
2789                                 while (dqrr) {
2790                                         if (dqrr->fqid == fqid &&
2791                                             dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2792                                                 fq_empty = 1;
2793                                         qm_dqrr_cdc_consume_1ptr(low_p,
2794                                                                  dqrr, 0);
2795                                         qm_dqrr_pvb_update(low_p);
2796                                         qm_dqrr_next(low_p);
2797                                         dqrr = qm_dqrr_current(low_p);
2798                                 }
2799                         } while (fq_empty == 0);
2800                 }
2801                 qm_dqrr_sdqcr_set(low_p, 0);
2802
2803                 /* Wait for the ORL to have been completely drained */
2804                 while (orl_empty == 0) {
2805                         const struct qm_mr_entry *msg;
2806
2807                         qm_mr_pvb_update(low_p);
2808                         msg = qm_mr_current(low_p);
2809                         while (msg) {
2810                                 if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
2811                                     QM_MR_VERB_FQRL)
2812                                         orl_empty = 1;
2813                                 qm_mr_next(low_p);
2814                                 qm_mr_cci_consume_to_current(low_p);
2815                                 qm_mr_pvb_update(low_p);
2816                                 msg = qm_mr_current(low_p);
2817                         }
2818                         cpu_relax();
2819                 }
2820                 mcc = qm_mc_start(low_p);
2821                 mcc->alterfq.fqid = cpu_to_be32(fqid);
2822                 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2823                 while (!(mcr = qm_mc_result(low_p)))
2824                         cpu_relax();
2825                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2826                            QM_MCR_VERB_ALTER_OOS);
2827                 if (mcr->result != QM_MCR_RESULT_OK) {
2828                         pr_err(
2829                         "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2830                                fqid, mcr->result);
2831                         return -1;
2832                 }
2833                 return 0;
2834
2835         case QM_MCR_NP_STATE_RETIRED:
2836                 /* Send OOS Command */
2837                 mcc = qm_mc_start(low_p);
2838                 mcc->alterfq.fqid = cpu_to_be32(fqid);
2839                 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2840                 while (!(mcr = qm_mc_result(low_p)))
2841                         cpu_relax();
2842                 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2843                            QM_MCR_VERB_ALTER_OOS);
2844                 if (mcr->result) {
2845                         pr_err("OOS Failed on FQID 0x%x\n", fqid);
2846                         return -1;
2847                 }
2848                 return 0;
2849
2850         }
2851         return -1;
2852 }