2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright 2008-2016 Freescale Semiconductor Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * * Neither the name of the above-listed copyright holders nor the
18 * names of any contributors may be used to endorse or promote products
19 * derived from this software without specific prior written permission.
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_branch_prediction.h>
44 /* Compilation constants */
45 #define DQRR_MAXFILL 15
46 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
47 #define IRQNAME "QMan portal %d"
48 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
49 /* maximum number of DQRR entries to process in qman_poll() */
50 #define FSL_QMAN_POLL_LIMIT 8
52 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
53 * inter-processor locking only. Note, FQLOCK() is always called either under a
54 * local_irq_save() or from interrupt context - hence there's no need for irq
55 * protection (and indeed, attempting to nest irq-protection doesn't work, as
56 * the "irq en/disable" machinery isn't recursive...).
60 struct qman_fq *__fq478 = (fq); \
61 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
62 spin_lock(&__fq478->fqlock); \
64 #define FQUNLOCK(fq) \
66 struct qman_fq *__fq478 = (fq); \
67 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
68 spin_unlock(&__fq478->fqlock); \
71 static inline void fq_set(struct qman_fq *fq, u32 mask)
73 dpaa_set_bits(mask, &fq->flags);
76 static inline void fq_clear(struct qman_fq *fq, u32 mask)
78 dpaa_clear_bits(mask, &fq->flags);
81 static inline int fq_isset(struct qman_fq *fq, u32 mask)
83 return fq->flags & mask;
86 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
88 return !(fq->flags & mask);
93 /* PORTAL_BITS_*** - dynamic, strictly internal */
95 /* interrupt sources processed by portal_isr(), configurable */
96 unsigned long irq_sources;
97 u32 use_eqcr_ci_stashing;
98 u32 slowpoll; /* only used when interrupts are off */
99 /* only 1 volatile dequeue at a time */
100 struct qman_fq *vdqcr_owned;
102 int dqrr_disable_ref;
103 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
104 * handler is called instead.
106 qman_cb_dc_ern cb_dc_ern;
107 /* When the cpu-affine portal is activated, this is non-NULL */
108 const struct qm_portal_config *config;
109 struct dpa_rbtree retire_table;
110 char irqname[MAX_IRQNAME];
111 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
112 struct qman_cgrs *cgrs;
113 /* linked-list of CSCN handlers. */
114 struct list_head cgr_cbs;
117 /* track if memory was allocated by the driver */
118 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
119 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
120 * do byte swaps of DQRR read only memory. First entry must be aligned
121 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
122 * address (6 bits for address shift + 4 bits for the DQRR size).
124 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
125 __attribute__((aligned(1024)));
129 /* Global handler for DCP ERNs. Used when the portal receiving the message does
130 * not have a portal-specific handler.
132 static qman_cb_dc_ern cb_dc_ern;
134 static cpumask_t affine_mask;
135 static DEFINE_SPINLOCK(affine_mask_lock);
136 static u16 affine_channels[NR_CPUS];
137 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
139 static inline struct qman_portal *get_affine_portal(void)
141 return &RTE_PER_LCORE(qman_affine_portal);
144 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
145 * retirement notifications (the fact they are sometimes h/w-consumed means that
146 * contextB isn't always a s/w demux - and as we can't know which case it is
147 * when looking at the notification, we have to use the slow lookup for all of
148 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
149 * (though at most one of them should be the consumer), so this table isn't for
150 * all FQs - FQs are added when retirement commands are issued, and removed when
151 * they complete, which also massively reduces the size of this table.
153 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
155 * This is what everything can wait on, even if it migrates to a different cpu
156 * to the one whose affine portal it is waiting on.
158 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
160 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
162 int ret = fqtree_push(&p->retire_table, fq);
165 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
169 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
171 fqtree_del(&p->retire_table, fq);
174 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
176 return fqtree_find(&p->retire_table, fqid);
179 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
181 /* Byteswap the FQD to HW format */
182 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
183 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
184 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
185 fqd->context_b = cpu_to_be32(fqd->context_b);
186 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
187 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
190 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
192 /* Byteswap the FQD to CPU format */
193 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
194 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
195 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
196 fqd->context_b = be32_to_cpu(fqd->context_b);
197 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
200 static inline void cpu_to_hw_fd(struct qm_fd *fd)
202 fd->addr = cpu_to_be40(fd->addr);
203 fd->status = cpu_to_be32(fd->status);
204 fd->opaque = cpu_to_be32(fd->opaque);
207 static inline void hw_fd_to_cpu(struct qm_fd *fd)
209 fd->addr = be40_to_cpu(fd->addr);
210 fd->status = be32_to_cpu(fd->status);
211 fd->opaque = be32_to_cpu(fd->opaque);
214 /* In the case that slow- and fast-path handling are both done by qman_poll()
215 * (ie. because there is no interrupt handling), we ought to balance how often
216 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
217 * sources, so we call the fast poll 'n' times before calling the slow poll
218 * once. The idle decrementer constant is used when the last slow-poll detected
219 * no work to do, and the busy decrementer constant when the last slow-poll had
222 #define SLOW_POLL_IDLE 1000
223 #define SLOW_POLL_BUSY 10
224 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
225 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
226 unsigned int poll_limit);
228 /* Portal interrupt handler */
229 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
231 struct qman_portal *p = ptr;
233 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
234 * it could race against a Query Congestion State command also given
235 * as part of the handling of this interrupt source. We mustn't
236 * clear it a second time in this top-level function.
238 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
239 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
240 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
241 /* DQRR-handling if it's interrupt-driven */
242 if (is & QM_PIRQ_DQRI)
243 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
244 /* Handling of anything else that's interrupt-driven */
245 clear |= __poll_portal_slow(p, is);
246 qm_isr_status_clear(&p->p, clear);
250 /* This inner version is used privately by qman_create_affine_portal(), as well
251 * as by the exported qman_stop_dequeues().
253 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
255 if (!(p->dqrr_disable_ref++))
256 qm_dqrr_set_maxfill(&p->p, 0);
259 static int drain_mr_fqrni(struct qm_portal *p)
261 const struct qm_mr_entry *msg;
263 msg = qm_mr_current(p);
266 * if MR was full and h/w had other FQRNI entries to produce, we
267 * need to allow it time to produce those entries once the
268 * existing entries are consumed. A worst-case situation
269 * (fully-loaded system) means h/w sequencers may have to do 3-4
270 * other things before servicing the portal's MR pump, each of
271 * which (if slow) may take ~50 qman cycles (which is ~200
272 * processor cycles). So rounding up and then multiplying this
273 * worst-case estimate by a factor of 10, just to be
274 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
275 * one entry at a time, so h/w has an opportunity to produce new
276 * entries well before the ring has been fully consumed, so
277 * we're being *really* paranoid here.
279 u64 now, then = mfatb();
283 } while ((then + 10000) > now);
284 msg = qm_mr_current(p);
288 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
289 /* We aren't draining anything but FQRNIs */
290 pr_err("Found verb 0x%x in MR\n", msg->verb);
294 qm_mr_cci_consume(p, 1);
298 static inline int qm_eqcr_init(struct qm_portal *portal,
299 enum qm_eqcr_pmode pmode,
300 unsigned int eq_stash_thresh,
303 /* This use of 'register', as well as all other occurrences, is because
304 * it has been observed to generate much faster code with gcc than is
305 * otherwise the case.
307 register struct qm_eqcr *eqcr = &portal->eqcr;
311 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
312 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
313 qm_cl_invalidate(EQCR_CI);
314 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
315 eqcr->cursor = eqcr->ring + pi;
316 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
317 QM_EQCR_VERB_VBIT : 0;
318 eqcr->available = QM_EQCR_SIZE - 1 -
319 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
320 eqcr->ithresh = qm_in(EQCR_ITR);
321 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
325 cfg = (qm_in(CFG) & 0x00ffffff) |
326 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
327 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
328 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
333 static inline void qm_eqcr_finish(struct qm_portal *portal)
335 register struct qm_eqcr *eqcr = &portal->eqcr;
340 * Disable EQCI stashing because the QMan only
341 * presents the value it previously stashed to
342 * maintain coherency. Setting the stash threshold
343 * to 1 then 0 ensures that QMan has resyncronized
344 * its internal copy so that the portal is clean
345 * when it is reinitialized in the future
347 cfg = (qm_in(CFG) & 0x0fffffff) |
348 (1 << 28); /* QCSP_CFG: EST */
350 cfg &= 0x0fffffff; /* stash threshold = 0 */
353 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
354 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
356 /* Refresh EQCR CI cache value */
357 qm_cl_invalidate(EQCR_CI);
358 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
360 DPAA_ASSERT(!eqcr->busy);
361 if (pi != EQCR_PTR2IDX(eqcr->cursor))
362 pr_crit("losing uncommitted EQCR entries\n");
364 pr_crit("missing existing EQCR completions\n");
365 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
366 pr_crit("EQCR destroyed unquiesced\n");
369 static inline int qm_dqrr_init(struct qm_portal *portal,
370 __maybe_unused const struct qm_portal_config *config,
371 enum qm_dqrr_dmode dmode,
372 __maybe_unused enum qm_dqrr_pmode pmode,
373 enum qm_dqrr_cmode cmode, u8 max_fill)
375 register struct qm_dqrr *dqrr = &portal->dqrr;
378 /* Make sure the DQRR will be idle when we enable */
379 qm_out(DQRR_SDQCR, 0);
380 qm_out(DQRR_VDQCR, 0);
381 qm_out(DQRR_PDQCR, 0);
382 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
383 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
384 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
385 dqrr->cursor = dqrr->ring + dqrr->ci;
386 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
387 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
388 QM_DQRR_VERB_VBIT : 0;
389 dqrr->ithresh = qm_in(DQRR_ITR);
390 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
395 /* Invalidate every ring entry before beginning */
396 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
397 dccivac(qm_cl(dqrr->ring, cfg));
398 cfg = (qm_in(CFG) & 0xff000f00) |
399 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
400 ((dmode & 1) << 18) | /* DP */
401 ((cmode & 3) << 16) | /* DCM */
403 (0 ? 0x40 : 0) | /* Ignore RP */
404 (0 ? 0x10 : 0); /* Ignore SP */
406 qm_dqrr_set_maxfill(portal, max_fill);
410 static inline void qm_dqrr_finish(struct qm_portal *portal)
412 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
413 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
414 if ((dqrr->cmode != qm_dqrr_cdc) &&
415 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
416 pr_crit("Ignoring completed DQRR entries\n");
420 static inline int qm_mr_init(struct qm_portal *portal,
421 __maybe_unused enum qm_mr_pmode pmode,
422 enum qm_mr_cmode cmode)
424 register struct qm_mr *mr = &portal->mr;
427 mr->ring = portal->addr.ce + QM_CL_MR;
428 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
429 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
430 mr->cursor = mr->ring + mr->ci;
431 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
432 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
433 mr->ithresh = qm_in(MR_ITR);
434 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
438 cfg = (qm_in(CFG) & 0xfffff0ff) |
439 ((cmode & 1) << 8); /* QCSP_CFG:MM */
444 static inline void qm_mr_pvb_update(struct qm_portal *portal)
446 register struct qm_mr *mr = &portal->mr;
447 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
449 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
450 /* when accessing 'verb', use __raw_readb() to ensure that compiler
451 * inlining doesn't try to optimise out "excess reads".
453 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
454 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
456 mr->vbit ^= QM_MR_VERB_VBIT;
464 struct qman_portal *qman_create_portal(
465 struct qman_portal *portal,
466 const struct qm_portal_config *c,
467 const struct qman_cgrs *cgrs)
476 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
478 * prep the low-level portal struct with the mapped addresses from the
479 * config, everything that follows depends on it and "config" is more
482 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
483 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
485 * If CI-stashing is used, the current defaults use a threshold of 3,
486 * and stash with high-than-DQRR priority.
488 if (qm_eqcr_init(p, qm_eqcr_pvb,
489 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
490 pr_err("Qman EQCR initialisation failed\n");
493 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
494 qm_dqrr_cdc, DQRR_MAXFILL)) {
495 pr_err("Qman DQRR initialisation failed\n");
498 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
499 pr_err("Qman MR initialisation failed\n");
503 pr_err("Qman MC initialisation failed\n");
507 /* static interrupt-gating controls */
508 qm_dqrr_set_ithresh(p, 0);
509 qm_mr_set_ithresh(p, 0);
510 qm_isr_set_iperiod(p, 0);
511 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
514 /* initial snapshot is no-depletion */
515 qman_cgrs_init(&portal->cgrs[1]);
517 portal->cgrs[0] = *cgrs;
519 /* if the given mask is NULL, assume all CGRs can be seen */
520 qman_cgrs_fill(&portal->cgrs[0]);
521 INIT_LIST_HEAD(&portal->cgr_cbs);
522 spin_lock_init(&portal->cgr_lock);
524 portal->slowpoll = 0;
525 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
526 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
527 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
528 portal->dqrr_disable_ref = 0;
529 portal->cb_dc_ern = NULL;
530 sprintf(buf, "qportal-%d", c->channel);
531 dpa_rbtree_init(&portal->retire_table);
533 qm_isr_disable_write(p, isdr);
534 portal->irq_sources = 0;
535 qm_isr_enable_write(p, portal->irq_sources);
536 qm_isr_status_clear(p, 0xffffffff);
537 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
538 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
540 pr_err("request_irq() failed\n");
544 /* Need EQCR to be empty before continuing */
545 isdr &= ~QM_PIRQ_EQCI;
546 qm_isr_disable_write(p, isdr);
547 ret = qm_eqcr_get_fill(p);
549 pr_err("Qman EQCR unclean\n");
550 goto fail_eqcr_empty;
552 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
553 qm_isr_disable_write(p, isdr);
554 if (qm_dqrr_current(p)) {
555 pr_err("Qman DQRR unclean\n");
556 qm_dqrr_cdc_consume_n(p, 0xffff);
558 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
559 /* special handling, drain just in case it's a few FQRNIs */
560 if (drain_mr_fqrni(p))
561 goto fail_dqrr_mr_empty;
565 qm_isr_disable_write(p, 0);
567 /* Write a sane SDQCR */
568 qm_dqrr_sdqcr_set(p, portal->sdqcr);
572 free_irq(c->irq, portal);
575 spin_lock_destroy(&portal->cgr_lock);
588 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
589 const struct qman_cgrs *cgrs)
591 struct qman_portal *res;
592 struct qman_portal *portal = get_affine_portal();
593 /* A criteria for calling this function (from qman_driver.c) is that
594 * we're already affine to the cpu and won't schedule onto another cpu.
597 res = qman_create_portal(portal, c, cgrs);
599 spin_lock(&affine_mask_lock);
600 CPU_SET(c->cpu, &affine_mask);
601 affine_channels[c->cpu] =
603 spin_unlock(&affine_mask_lock);
609 void qman_destroy_portal(struct qman_portal *qm)
611 const struct qm_portal_config *pcfg;
613 /* Stop dequeues on the portal */
614 qm_dqrr_sdqcr_set(&qm->p, 0);
617 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
618 * something related to QM_PIRQ_EQCI, this may need fixing.
619 * Also, due to the prefetching model used for CI updates in the enqueue
620 * path, this update will only invalidate the CI cacheline *after*
621 * working on it, so we need to call this twice to ensure a full update
622 * irrespective of where the enqueue processing was at when the teardown
625 qm_eqcr_cce_update(&qm->p);
626 qm_eqcr_cce_update(&qm->p);
629 free_irq(pcfg->irq, qm);
632 qm_mc_finish(&qm->p);
633 qm_mr_finish(&qm->p);
634 qm_dqrr_finish(&qm->p);
635 qm_eqcr_finish(&qm->p);
639 spin_lock_destroy(&qm->cgr_lock);
642 const struct qm_portal_config *qman_destroy_affine_portal(void)
644 /* We don't want to redirect if we're a slave, use "raw" */
645 struct qman_portal *qm = get_affine_portal();
646 const struct qm_portal_config *pcfg;
652 qman_destroy_portal(qm);
654 spin_lock(&affine_mask_lock);
655 CPU_CLR(cpu, &affine_mask);
656 spin_unlock(&affine_mask_lock);
660 int qman_get_portal_index(void)
662 struct qman_portal *p = get_affine_portal();
663 return p->config->index;
666 /* Inline helper to reduce nesting in __poll_portal_slow() */
667 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
668 const struct qm_mr_entry *msg, u8 verb)
672 case QM_MR_VERB_FQRL:
673 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
674 fq_clear(fq, QMAN_FQ_STATE_ORL);
677 case QM_MR_VERB_FQRN:
678 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
679 (fq->state == qman_fq_state_sched));
680 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
681 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
682 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
683 fq_set(fq, QMAN_FQ_STATE_NE);
684 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
685 fq_set(fq, QMAN_FQ_STATE_ORL);
688 fq->state = qman_fq_state_retired;
690 case QM_MR_VERB_FQPN:
691 DPAA_ASSERT(fq->state == qman_fq_state_sched);
692 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
693 fq->state = qman_fq_state_parked;
698 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
700 const struct qm_mr_entry *msg;
701 struct qm_mr_entry swapped_msg;
703 if (is & QM_PIRQ_CSCI) {
704 struct qman_cgrs rr, c;
705 struct qm_mc_result *mcr;
706 struct qman_cgr *cgr;
708 spin_lock(&p->cgr_lock);
710 * The CSCI bit must be cleared _before_ issuing the
711 * Query Congestion State command, to ensure that a long
712 * CGR State Change callback cannot miss an intervening
715 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
717 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
718 while (!(mcr = qm_mc_result(&p->p)))
720 /* mask out the ones I'm not interested in */
721 qman_cgrs_and(&rr, (const struct qman_cgrs *)
722 &mcr->querycongestion.state, &p->cgrs[0]);
723 /* check previous snapshot for delta, enter/exit congestion */
724 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
725 /* update snapshot */
726 qman_cgrs_cp(&p->cgrs[1], &rr);
727 /* Invoke callback */
728 list_for_each_entry(cgr, &p->cgr_cbs, node)
729 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
730 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
731 spin_unlock(&p->cgr_lock);
734 if (is & QM_PIRQ_EQRI) {
735 qm_eqcr_cce_update(&p->p);
736 qm_eqcr_set_ithresh(&p->p, 0);
737 wake_up(&affine_queue);
740 if (is & QM_PIRQ_MRI) {
744 qm_mr_pvb_update(&p->p);
745 msg = qm_mr_current(&p->p);
749 hw_fd_to_cpu(&swapped_msg.ern.fd);
750 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
751 /* The message is a software ERN iff the 0x20 bit is set */
754 case QM_MR_VERB_FQRNI:
755 /* nada, we drop FQRNIs on the floor */
757 case QM_MR_VERB_FQRN:
758 case QM_MR_VERB_FQRL:
759 /* Lookup in the retirement table */
760 fq = table_find_fq(p,
761 be32_to_cpu(msg->fq.fqid));
763 fq_state_change(p, fq, &swapped_msg, verb);
765 fq->cb.fqs(p, fq, &swapped_msg);
767 case QM_MR_VERB_FQPN:
769 fq = (void *)(uintptr_t)
770 be32_to_cpu(msg->fq.contextB);
771 fq_state_change(p, fq, msg, verb);
773 fq->cb.fqs(p, fq, &swapped_msg);
775 case QM_MR_VERB_DC_ERN:
778 p->cb_dc_ern(p, msg);
782 static int warn_once;
785 pr_crit("Leaking DCP ERNs!\n");
791 pr_crit("Invalid MR verb 0x%02x\n", verb);
794 /* Its a software ERN */
795 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
796 fq->cb.ern(p, fq, &swapped_msg);
802 qm_mr_cci_consume(&p->p, num);
805 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
806 * processing. If that interrupt source has meanwhile been re-asserted,
807 * we mustn't clear it here (or in the top-level interrupt handler).
809 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
813 * remove some slowish-path stuff from the "fast path" and make sure it isn't
816 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
818 p->vdqcr_owned = NULL;
820 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
822 wake_up(&affine_queue);
826 * The only states that would conflict with other things if they ran at the
827 * same time on the same cpu are:
829 * (i) setting/clearing vdqcr_owned, and
830 * (ii) clearing the NE (Not Empty) flag.
832 * Both are safe. Because;
834 * (i) this clearing can only occur after qman_set_vdq() has set the
835 * vdqcr_owned field (which it does before setting VDQCR), and
836 * qman_volatile_dequeue() blocks interrupts and preemption while this is
837 * done so that we can't interfere.
838 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
839 * with (i) that API prevents us from interfering until it's safe.
841 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
842 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
843 * advantage comes from this function not having to "lock" anything at all.
845 * Note also that the callbacks are invoked at points which are safe against the
846 * above potential conflicts, but that this function itself is not re-entrant
847 * (this is because the function tracks one end of each FIFO in the portal and
848 * we do *not* want to lock that). So the consequence is that it is safe for
849 * user callbacks to call into any QMan API.
851 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
852 unsigned int poll_limit)
854 const struct qm_dqrr_entry *dq;
856 enum qman_cb_dqrr_result res;
857 unsigned int limit = 0;
858 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
859 struct qm_dqrr_entry *shadow;
862 qm_dqrr_pvb_update(&p->p);
863 dq = qm_dqrr_current(&p->p);
866 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
867 /* If running on an LE system the fields of the
868 * dequeue entry must be swapper. Because the
869 * QMan HW will ignore writes the DQRR entry is
870 * copied and the index stored within the copy
872 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
875 shadow->fqid = be32_to_cpu(shadow->fqid);
876 shadow->contextB = be32_to_cpu(shadow->contextB);
877 shadow->seqnum = be16_to_cpu(shadow->seqnum);
878 hw_fd_to_cpu(&shadow->fd);
881 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
883 * VDQCR: don't trust context_b as the FQ may have
884 * been configured for h/w consumption and we're
885 * draining it post-retirement.
889 * We only set QMAN_FQ_STATE_NE when retiring, so we
890 * only need to check for clearing it when doing
891 * volatile dequeues. It's one less thing to check
892 * in the critical path (SDQCR).
894 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
895 fq_clear(fq, QMAN_FQ_STATE_NE);
897 * This is duplicated from the SDQCR code, but we
898 * have stuff to do before *and* after this callback,
899 * and we don't want multiple if()s in the critical
902 res = fq->cb.dqrr(p, fq, dq);
903 if (res == qman_cb_dqrr_stop)
905 /* Check for VDQCR completion */
906 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
909 /* SDQCR: context_b points to the FQ */
910 fq = (void *)(uintptr_t)dq->contextB;
911 /* Now let the callback do its stuff */
912 res = fq->cb.dqrr(p, fq, dq);
914 * The callback can request that we exit without
915 * consuming this entry nor advancing;
917 if (res == qman_cb_dqrr_stop)
920 /* Interpret 'dq' from a driver perspective. */
922 * Parking isn't possible unless HELDACTIVE was set. NB,
923 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
924 * check for HELDACTIVE to cover both.
926 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
927 (res != qman_cb_dqrr_park));
928 /* just means "skip it, I'll consume it myself later on" */
929 if (res != qman_cb_dqrr_defer)
930 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
931 res == qman_cb_dqrr_park);
935 * Entry processed and consumed, increment our counter. The
936 * callback can request that we exit after consuming the
937 * entry, and we also exit if we reach our processing limit,
938 * so loop back only if neither of these conditions is met.
940 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
945 u16 qman_affine_channel(int cpu)
948 struct qman_portal *portal = get_affine_portal();
950 cpu = portal->config->cpu;
952 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
953 return affine_channels[cpu];
956 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
958 struct qman_portal *p = get_affine_portal();
959 const struct qm_dqrr_entry *dq;
960 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
961 struct qm_dqrr_entry *shadow;
964 qm_dqrr_pvb_update(&p->p);
965 dq = qm_dqrr_current(&p->p);
969 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
970 /* Invalid DQRR - put the portal and consume the DQRR.
971 * Return NULL to user as no packet is seen.
973 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
977 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
978 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
981 shadow->fqid = be32_to_cpu(shadow->fqid);
982 shadow->contextB = be32_to_cpu(shadow->contextB);
983 shadow->seqnum = be16_to_cpu(shadow->seqnum);
984 hw_fd_to_cpu(&shadow->fd);
987 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
988 fq_clear(fq, QMAN_FQ_STATE_NE);
990 return (struct qm_dqrr_entry *)dq;
993 void qman_dqrr_consume(struct qman_fq *fq,
994 struct qm_dqrr_entry *dq)
996 struct qman_portal *p = get_affine_portal();
998 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1001 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1002 qm_dqrr_next(&p->p);
1005 int qman_poll_dqrr(unsigned int limit)
1007 struct qman_portal *p = get_affine_portal();
1010 ret = __poll_portal_fast(p, limit);
1014 void qman_poll(void)
1016 struct qman_portal *p = get_affine_portal();
1018 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1019 if (!(p->slowpoll--)) {
1020 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1021 u32 active = __poll_portal_slow(p, is);
1024 qm_isr_status_clear(&p->p, active);
1025 p->slowpoll = SLOW_POLL_BUSY;
1027 p->slowpoll = SLOW_POLL_IDLE;
1030 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1031 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1034 void qman_stop_dequeues(void)
1036 struct qman_portal *p = get_affine_portal();
1038 qman_stop_dequeues_ex(p);
1041 void qman_start_dequeues(void)
1043 struct qman_portal *p = get_affine_portal();
1045 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1046 if (!(--p->dqrr_disable_ref))
1047 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1050 void qman_static_dequeue_add(u32 pools)
1052 struct qman_portal *p = get_affine_portal();
1054 pools &= p->config->pools;
1056 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1059 void qman_static_dequeue_del(u32 pools)
1061 struct qman_portal *p = get_affine_portal();
1063 pools &= p->config->pools;
1065 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1068 u32 qman_static_dequeue_get(void)
1070 struct qman_portal *p = get_affine_portal();
1074 void qman_dca(struct qm_dqrr_entry *dq, int park_request)
1076 struct qman_portal *p = get_affine_portal();
1078 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1081 /* Frame queue API */
1082 static const char *mcr_result_str(u8 result)
1085 case QM_MCR_RESULT_NULL:
1086 return "QM_MCR_RESULT_NULL";
1087 case QM_MCR_RESULT_OK:
1088 return "QM_MCR_RESULT_OK";
1089 case QM_MCR_RESULT_ERR_FQID:
1090 return "QM_MCR_RESULT_ERR_FQID";
1091 case QM_MCR_RESULT_ERR_FQSTATE:
1092 return "QM_MCR_RESULT_ERR_FQSTATE";
1093 case QM_MCR_RESULT_ERR_NOTEMPTY:
1094 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1095 case QM_MCR_RESULT_PENDING:
1096 return "QM_MCR_RESULT_PENDING";
1097 case QM_MCR_RESULT_ERR_BADCOMMAND:
1098 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1100 return "<unknown MCR result>";
1103 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1106 struct qm_mcr_queryfq_np np;
1107 struct qm_mc_command *mcc;
1108 struct qm_mc_result *mcr;
1109 struct qman_portal *p;
1111 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1112 int ret = qman_alloc_fqid(&fqid);
1117 spin_lock_init(&fq->fqlock);
1120 fq->state = qman_fq_state_oos;
1121 fq->cgr_groupid = 0;
1123 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1125 /* Everything else is AS_IS support */
1126 p = get_affine_portal();
1127 mcc = qm_mc_start(&p->p);
1128 mcc->queryfq.fqid = cpu_to_be32(fqid);
1129 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1130 while (!(mcr = qm_mc_result(&p->p)))
1132 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1133 if (mcr->result != QM_MCR_RESULT_OK) {
1134 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1137 fqd = mcr->queryfq.fqd;
1138 hw_fqd_to_cpu(&fqd);
1139 mcc = qm_mc_start(&p->p);
1140 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1141 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1142 while (!(mcr = qm_mc_result(&p->p)))
1144 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1145 if (mcr->result != QM_MCR_RESULT_OK) {
1146 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1149 np = mcr->queryfq_np;
1150 /* Phew, have queryfq and queryfq_np results, stitch together
1151 * the FQ object from those.
1153 fq->cgr_groupid = fqd.cgid;
1154 switch (np.state & QM_MCR_NP_STATE_MASK) {
1155 case QM_MCR_NP_STATE_OOS:
1157 case QM_MCR_NP_STATE_RETIRED:
1158 fq->state = qman_fq_state_retired;
1160 fq_set(fq, QMAN_FQ_STATE_NE);
1162 case QM_MCR_NP_STATE_TEN_SCHED:
1163 case QM_MCR_NP_STATE_TRU_SCHED:
1164 case QM_MCR_NP_STATE_ACTIVE:
1165 fq->state = qman_fq_state_sched;
1166 if (np.state & QM_MCR_NP_STATE_R)
1167 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1169 case QM_MCR_NP_STATE_PARKED:
1170 fq->state = qman_fq_state_parked;
1173 DPAA_ASSERT(NULL == "invalid FQ state");
1175 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1176 fq->state |= QMAN_FQ_STATE_CGR_EN;
1179 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1180 qman_release_fqid(fqid);
1184 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1187 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1188 * quiesced. Instead, run some checks.
1190 switch (fq->state) {
1191 case qman_fq_state_parked:
1192 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1193 case qman_fq_state_oos:
1194 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1195 qman_release_fqid(fq->fqid);
1201 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1204 u32 qman_fq_fqid(struct qman_fq *fq)
1209 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1217 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1219 struct qm_mc_command *mcc;
1220 struct qm_mc_result *mcr;
1221 struct qman_portal *p;
1223 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1224 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1226 if ((fq->state != qman_fq_state_oos) &&
1227 (fq->state != qman_fq_state_parked))
1229 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1230 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1233 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1234 /* And can't be set at the same time as TDTHRESH */
1235 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1238 /* Issue an INITFQ_[PARKED|SCHED] management command */
1239 p = get_affine_portal();
1241 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1242 ((fq->state != qman_fq_state_oos) &&
1243 (fq->state != qman_fq_state_parked)))) {
1247 mcc = qm_mc_start(&p->p);
1249 mcc->initfq = *opts;
1250 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1251 mcc->initfq.count = 0;
1253 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1254 * demux pointer. Otherwise, the caller-provided value is allowed to
1255 * stand, don't overwrite it.
1257 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1260 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1261 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1263 * and the physical address - NB, if the user wasn't trying to
1264 * set CONTEXTA, clear the stashing settings.
1266 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1267 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1268 memset(&mcc->initfq.fqd.context_a, 0,
1269 sizeof(mcc->initfq.fqd.context_a));
1271 phys_fq = rte_mem_virt2phy(fq);
1272 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1275 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1276 mcc->initfq.fqd.dest.channel = p->config->channel;
1277 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1278 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1279 mcc->initfq.fqd.dest.wq = 4;
1282 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1283 cpu_to_hw_fqd(&mcc->initfq.fqd);
1284 qm_mc_commit(&p->p, myverb);
1285 while (!(mcr = qm_mc_result(&p->p)))
1287 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1289 if (res != QM_MCR_RESULT_OK) {
1294 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1295 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1296 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1298 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1300 if (opts->we_mask & QM_INITFQ_WE_CGID)
1301 fq->cgr_groupid = opts->fqd.cgid;
1303 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1304 qman_fq_state_sched : qman_fq_state_parked;
1309 int qman_schedule_fq(struct qman_fq *fq)
1311 struct qm_mc_command *mcc;
1312 struct qm_mc_result *mcr;
1313 struct qman_portal *p;
1318 if (fq->state != qman_fq_state_parked)
1320 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1321 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1324 /* Issue a ALTERFQ_SCHED management command */
1325 p = get_affine_portal();
1328 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1329 (fq->state != qman_fq_state_parked))) {
1333 mcc = qm_mc_start(&p->p);
1334 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1335 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1336 while (!(mcr = qm_mc_result(&p->p)))
1338 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1340 if (res != QM_MCR_RESULT_OK) {
1344 fq->state = qman_fq_state_sched;
1351 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1353 struct qm_mc_command *mcc;
1354 struct qm_mc_result *mcr;
1355 struct qman_portal *p;
1360 if ((fq->state != qman_fq_state_parked) &&
1361 (fq->state != qman_fq_state_sched))
1363 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1364 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1367 p = get_affine_portal();
1370 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1371 (fq->state == qman_fq_state_retired) ||
1372 (fq->state == qman_fq_state_oos))) {
1376 rval = table_push_fq(p, fq);
1379 mcc = qm_mc_start(&p->p);
1380 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1381 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1382 while (!(mcr = qm_mc_result(&p->p)))
1384 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1387 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1388 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1389 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1390 * friendly, otherwise the caller doesn't necessarily have a fully
1391 * "retired" FQ on return even if the retirement was immediate. However
1392 * this does mean some code duplication between here and
1393 * fq_state_change().
1395 if (likely(res == QM_MCR_RESULT_OK)) {
1397 /* Process 'fq' right away, we'll ignore FQRNI */
1398 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1399 fq_set(fq, QMAN_FQ_STATE_NE);
1400 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1401 fq_set(fq, QMAN_FQ_STATE_ORL);
1403 table_del_fq(p, fq);
1406 fq->state = qman_fq_state_retired;
1409 * Another issue with supporting "immediate" retirement
1410 * is that we're forced to drop FQRNIs, because by the
1411 * time they're seen it may already be "too late" (the
1412 * fq may have been OOS'd and free()'d already). But if
1413 * the upper layer wants a callback whether it's
1414 * immediate or not, we have to fake a "MR" entry to
1415 * look like an FQRNI...
1417 struct qm_mr_entry msg;
1419 msg.verb = QM_MR_VERB_FQRNI;
1420 msg.fq.fqs = mcr->alterfq.fqs;
1421 msg.fq.fqid = fq->fqid;
1422 msg.fq.contextB = (u32)(uintptr_t)fq;
1423 fq->cb.fqs(p, fq, &msg);
1425 } else if (res == QM_MCR_RESULT_PENDING) {
1427 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1430 table_del_fq(p, fq);
1437 int qman_oos_fq(struct qman_fq *fq)
1439 struct qm_mc_command *mcc;
1440 struct qm_mc_result *mcr;
1441 struct qman_portal *p;
1446 if (fq->state != qman_fq_state_retired)
1448 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1449 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1452 p = get_affine_portal();
1454 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1455 (fq->state != qman_fq_state_retired))) {
1459 mcc = qm_mc_start(&p->p);
1460 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1461 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1462 while (!(mcr = qm_mc_result(&p->p)))
1464 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1466 if (res != QM_MCR_RESULT_OK) {
1470 fq->state = qman_fq_state_oos;
1476 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1478 struct qm_mc_command *mcc;
1479 struct qm_mc_result *mcr;
1480 struct qman_portal *p;
1486 if ((fq->state == qman_fq_state_oos) ||
1487 (fq->state == qman_fq_state_retired) ||
1488 (fq->state == qman_fq_state_parked))
1491 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1492 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1495 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1496 p = get_affine_portal();
1498 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1499 (fq->state == qman_fq_state_parked) ||
1500 (fq->state == qman_fq_state_oos) ||
1501 (fq->state == qman_fq_state_retired))) {
1505 mcc = qm_mc_start(&p->p);
1506 mcc->alterfq.fqid = fq->fqid;
1507 mcc->alterfq.count = 0;
1508 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1510 qm_mc_commit(&p->p, myverb);
1511 while (!(mcr = qm_mc_result(&p->p)))
1513 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1516 if (res != QM_MCR_RESULT_OK) {
1525 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1527 struct qm_mc_command *mcc;
1528 struct qm_mc_result *mcr;
1529 struct qman_portal *p = get_affine_portal();
1533 mcc = qm_mc_start(&p->p);
1534 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1535 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1536 while (!(mcr = qm_mc_result(&p->p)))
1538 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1540 if (res == QM_MCR_RESULT_OK)
1541 *fqd = mcr->queryfq.fqd;
1543 if (res != QM_MCR_RESULT_OK)
1548 int qman_query_fq_has_pkts(struct qman_fq *fq)
1550 struct qm_mc_command *mcc;
1551 struct qm_mc_result *mcr;
1552 struct qman_portal *p = get_affine_portal();
1557 mcc = qm_mc_start(&p->p);
1558 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1559 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1560 while (!(mcr = qm_mc_result(&p->p)))
1563 if (res == QM_MCR_RESULT_OK)
1564 ret = !!mcr->queryfq_np.frm_cnt;
1568 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1570 struct qm_mc_command *mcc;
1571 struct qm_mc_result *mcr;
1572 struct qman_portal *p = get_affine_portal();
1576 mcc = qm_mc_start(&p->p);
1577 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1578 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1579 while (!(mcr = qm_mc_result(&p->p)))
1581 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1583 if (res == QM_MCR_RESULT_OK) {
1584 *np = mcr->queryfq_np;
1585 np->fqd_link = be24_to_cpu(np->fqd_link);
1586 np->odp_seq = be16_to_cpu(np->odp_seq);
1587 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1588 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1589 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1590 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1591 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1592 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1593 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1594 np->ics_surp = be16_to_cpu(np->ics_surp);
1595 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1596 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1597 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1598 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1599 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1600 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1601 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1603 if (res == QM_MCR_RESULT_ERR_FQID)
1605 else if (res != QM_MCR_RESULT_OK)
1610 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1612 struct qm_mc_command *mcc;
1613 struct qm_mc_result *mcr;
1614 struct qman_portal *p = get_affine_portal();
1618 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1619 QM_MCR_VERB_QUERYWQ;
1620 mcc = qm_mc_start(&p->p);
1621 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1622 qm_mc_commit(&p->p, myverb);
1623 while (!(mcr = qm_mc_result(&p->p)))
1625 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1627 if (res == QM_MCR_RESULT_OK) {
1630 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
1631 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
1632 for (i = 0; i < array_len; i++)
1633 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
1635 if (res != QM_MCR_RESULT_OK) {
1636 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
1642 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
1643 struct qm_mcr_cgrtestwrite *result)
1645 struct qm_mc_command *mcc;
1646 struct qm_mc_result *mcr;
1647 struct qman_portal *p = get_affine_portal();
1651 mcc = qm_mc_start(&p->p);
1652 mcc->cgrtestwrite.cgid = cgr->cgrid;
1653 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
1654 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
1655 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
1656 while (!(mcr = qm_mc_result(&p->p)))
1658 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
1660 if (res == QM_MCR_RESULT_OK)
1661 *result = mcr->cgrtestwrite;
1662 if (res != QM_MCR_RESULT_OK) {
1663 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
1669 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
1671 struct qm_mc_command *mcc;
1672 struct qm_mc_result *mcr;
1673 struct qman_portal *p = get_affine_portal();
1677 mcc = qm_mc_start(&p->p);
1678 mcc->querycgr.cgid = cgr->cgrid;
1679 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
1680 while (!(mcr = qm_mc_result(&p->p)))
1682 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
1684 if (res == QM_MCR_RESULT_OK)
1685 *cgrd = mcr->querycgr;
1686 if (res != QM_MCR_RESULT_OK) {
1687 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
1690 cgrd->cgr.wr_parm_g.word =
1691 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
1692 cgrd->cgr.wr_parm_y.word =
1693 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
1694 cgrd->cgr.wr_parm_r.word =
1695 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
1696 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
1697 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
1698 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
1699 cgrd->cscn_targ_swp[i] =
1700 be32_to_cpu(cgrd->cscn_targ_swp[i]);
1704 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
1706 struct qm_mc_result *mcr;
1707 struct qman_portal *p = get_affine_portal();
1712 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1713 while (!(mcr = qm_mc_result(&p->p)))
1715 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
1716 QM_MCC_VERB_QUERYCONGESTION);
1718 if (res == QM_MCR_RESULT_OK)
1719 *congestion = mcr->querycongestion;
1720 if (res != QM_MCR_RESULT_OK) {
1721 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
1724 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
1725 congestion->state.state[i] =
1726 be32_to_cpu(congestion->state.state[i]);
1730 int qman_set_vdq(struct qman_fq *fq, u16 num)
1732 struct qman_portal *p = get_affine_portal();
1736 vdqcr = QM_VDQCR_EXACT;
1737 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
1739 if ((fq->state != qman_fq_state_parked) &&
1740 (fq->state != qman_fq_state_retired)) {
1744 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
1748 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1750 if (!p->vdqcr_owned) {
1752 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1754 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1756 p->vdqcr_owned = fq;
1761 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1767 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
1770 struct qman_portal *p;
1773 if ((fq->state != qman_fq_state_parked) &&
1774 (fq->state != qman_fq_state_retired))
1776 if (vdqcr & QM_VDQCR_FQID_MASK)
1778 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1780 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1782 p = get_affine_portal();
1784 if (!p->vdqcr_owned) {
1786 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1788 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1790 p->vdqcr_owned = fq;
1798 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1802 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
1805 qm_eqcr_cce_prefetch(&p->p);
1807 qm_eqcr_cce_update(&p->p);
1810 int qman_eqcr_is_empty(void)
1812 struct qman_portal *p = get_affine_portal();
1815 update_eqcr_ci(p, 0);
1816 avail = qm_eqcr_get_fill(&p->p);
1817 return (avail == 0);
1820 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
1823 struct qman_portal *p = get_affine_portal();
1825 p->cb_dc_ern = handler;
1827 cb_dc_ern = handler;
1830 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
1832 const struct qm_fd *fd,
1835 struct qm_eqcr_entry *eq;
1838 if (p->use_eqcr_ci_stashing) {
1840 * The stashing case is easy, only update if we need to in
1841 * order to try and liberate ring entries.
1843 eq = qm_eqcr_start_stash(&p->p);
1846 * The non-stashing case is harder, need to prefetch ahead of
1849 avail = qm_eqcr_get_avail(&p->p);
1851 update_eqcr_ci(p, avail);
1852 eq = qm_eqcr_start_no_stash(&p->p);
1858 if (flags & QMAN_ENQUEUE_FLAG_DCA)
1859 eq->dca = QM_EQCR_DCA_ENABLE |
1860 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
1861 QM_EQCR_DCA_PARK : 0) |
1862 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
1863 eq->fqid = cpu_to_be32(fq->fqid);
1864 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1866 cpu_to_hw_fd(&eq->fd);
1870 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
1872 struct qman_portal *p = get_affine_portal();
1873 struct qm_eqcr_entry *eq;
1875 eq = try_p_eq_start(p, fq, fd, flags);
1878 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
1879 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
1880 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
1881 /* Factor the below out, it's used from qman_enqueue_orp() too */
1885 int qman_enqueue_multi(struct qman_fq *fq,
1886 const struct qm_fd *fd,
1889 struct qman_portal *p = get_affine_portal();
1890 struct qm_portal *portal = &p->p;
1892 register struct qm_eqcr *eqcr = &portal->eqcr;
1893 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
1895 u8 i, diff, old_ci, sent = 0;
1897 /* Update the available entries if no entry is free */
1898 if (!eqcr->available) {
1900 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
1901 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
1902 eqcr->available += diff;
1907 /* try to send as many frames as possible */
1908 while (eqcr->available && frames_to_send--) {
1909 eq->fqid = cpu_to_be32(fq->fqid);
1910 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1911 eq->fd.opaque_addr = fd->opaque_addr;
1912 eq->fd.addr = cpu_to_be40(fd->addr);
1913 eq->fd.status = cpu_to_be32(fd->status);
1914 eq->fd.opaque = cpu_to_be32(fd->opaque);
1916 eq = (void *)((unsigned long)(eq + 1) &
1917 (~(unsigned long)(QM_EQCR_SIZE << 6)));
1924 /* In order for flushes to complete faster, all lines are recorded in
1928 for (i = 0; i < sent; i++) {
1929 eq->__dont_write_directly__verb =
1930 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
1932 eq = (void *)((unsigned long)(eq + 1) &
1933 (~(unsigned long)(QM_EQCR_SIZE << 6)));
1934 if (unlikely((prev_eq + 1) != eq))
1935 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
1938 /* We need to flush all the lines but without load/store operations
1942 for (i = 0; i < sent; i++) {
1944 eq = (void *)((unsigned long)(eq + 1) &
1945 (~(unsigned long)(QM_EQCR_SIZE << 6)));
1947 /* Update cursor for the next call */
1952 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1953 struct qman_fq *orp, u16 orp_seqnum)
1955 struct qman_portal *p = get_affine_portal();
1956 struct qm_eqcr_entry *eq;
1958 eq = try_p_eq_start(p, fq, fd, flags);
1961 /* Process ORP-specifics here */
1962 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
1963 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
1965 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
1966 if (flags & QMAN_ENQUEUE_FLAG_NESN)
1967 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
1969 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
1970 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
1972 eq->seqnum = cpu_to_be16(orp_seqnum);
1973 eq->orp = cpu_to_be32(orp->fqid);
1974 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
1975 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
1976 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
1977 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
1978 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
1983 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
1984 struct qm_mcc_initcgr *opts)
1986 struct qm_mc_command *mcc;
1987 struct qm_mc_result *mcr;
1988 struct qman_portal *p = get_affine_portal();
1991 u8 verb = QM_MCC_VERB_MODIFYCGR;
1993 mcc = qm_mc_start(&p->p);
1995 mcc->initcgr = *opts;
1996 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
1997 mcc->initcgr.cgr.wr_parm_g.word =
1998 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
1999 mcc->initcgr.cgr.wr_parm_y.word =
2000 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2001 mcc->initcgr.cgr.wr_parm_r.word =
2002 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2003 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2004 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2006 mcc->initcgr.cgid = cgr->cgrid;
2007 if (flags & QMAN_CGR_FLAG_USE_INIT)
2008 verb = QM_MCC_VERB_INITCGR;
2009 qm_mc_commit(&p->p, verb);
2010 while (!(mcr = qm_mc_result(&p->p)))
2013 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2015 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2018 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2019 QM_CHANNEL_SWPORTAL0))
2020 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2021 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2023 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2024 struct qm_mcc_initcgr *opts)
2026 struct qm_mcr_querycgr cgr_state;
2027 struct qm_mcc_initcgr local_opts;
2029 struct qman_portal *p;
2031 /* We have to check that the provided CGRID is within the limits of the
2032 * data-structures, for obvious reasons. However we'll let h/w take
2033 * care of determining whether it's within the limits of what exists on
2036 if (cgr->cgrid >= __CGR_NUM)
2039 p = get_affine_portal();
2041 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2042 cgr->chan = p->config->channel;
2043 spin_lock(&p->cgr_lock);
2045 /* if no opts specified, just add it to the list */
2049 ret = qman_query_cgr(cgr, &cgr_state);
2054 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2055 local_opts.cgr.cscn_targ_upd_ctrl =
2056 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2058 /* Overwrite TARG */
2059 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2061 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2063 /* send init if flags indicate so */
2064 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2065 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2067 ret = qman_modify_cgr(cgr, 0, &local_opts);
2071 list_add(&cgr->node, &p->cgr_cbs);
2073 /* Determine if newly added object requires its callback to be called */
2074 ret = qman_query_cgr(cgr, &cgr_state);
2076 /* we can't go back, so proceed and return success, but screen
2077 * and wail to the log file.
2079 pr_crit("CGR HW state partially modified\n");
2083 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2087 spin_unlock(&p->cgr_lock);
2091 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2092 struct qm_mcc_initcgr *opts)
2094 struct qm_mcc_initcgr local_opts;
2095 struct qm_mcr_querycgr cgr_state;
2098 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2099 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2102 /* We have to check that the provided CGRID is within the limits of the
2103 * data-structures, for obvious reasons. However we'll let h/w take
2104 * care of determining whether it's within the limits of what exists on
2107 if (cgr->cgrid >= __CGR_NUM)
2110 ret = qman_query_cgr(cgr, &cgr_state);
2114 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2118 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2119 local_opts.cgr.cscn_targ_upd_ctrl =
2120 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2121 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2123 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2124 TARG_DCP_MASK(dcp_portal);
2125 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2127 /* send init if flags indicate so */
2128 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2129 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2132 ret = qman_modify_cgr(cgr, 0, &local_opts);
2137 int qman_delete_cgr(struct qman_cgr *cgr)
2139 struct qm_mcr_querycgr cgr_state;
2140 struct qm_mcc_initcgr local_opts;
2143 struct qman_portal *p = get_affine_portal();
2145 if (cgr->chan != p->config->channel) {
2146 pr_crit("Attempting to delete cgr from different portal than"
2147 " it was create: create 0x%x, delete 0x%x\n",
2148 cgr->chan, p->config->channel);
2152 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2153 spin_lock(&p->cgr_lock);
2154 list_del(&cgr->node);
2156 * If there are no other CGR objects for this CGRID in the list,
2157 * update CSCN_TARG accordingly
2159 list_for_each_entry(i, &p->cgr_cbs, node)
2160 if ((i->cgrid == cgr->cgrid) && i->cb)
2162 ret = qman_query_cgr(cgr, &cgr_state);
2164 /* add back to the list */
2165 list_add(&cgr->node, &p->cgr_cbs);
2168 /* Overwrite TARG */
2169 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2170 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2171 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2173 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2175 ret = qman_modify_cgr(cgr, 0, &local_opts);
2177 /* add back to the list */
2178 list_add(&cgr->node, &p->cgr_cbs);
2180 spin_unlock(&p->cgr_lock);
2185 int qman_shutdown_fq(u32 fqid)
2187 struct qman_portal *p;
2188 struct qm_portal *low_p;
2189 struct qm_mc_command *mcc;
2190 struct qm_mc_result *mcr;
2192 int orl_empty, fq_empty, drain = 0;
2197 p = get_affine_portal();
2200 /* Determine the state of the FQID */
2201 mcc = qm_mc_start(low_p);
2202 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2203 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2204 while (!(mcr = qm_mc_result(low_p)))
2206 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2207 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2208 if (state == QM_MCR_NP_STATE_OOS)
2209 return 0; /* Already OOS, no need to do anymore checks */
2211 /* Query which channel the FQ is using */
2212 mcc = qm_mc_start(low_p);
2213 mcc->queryfq.fqid = cpu_to_be32(fqid);
2214 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2215 while (!(mcr = qm_mc_result(low_p)))
2217 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2219 /* Need to store these since the MCR gets reused */
2220 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2221 channel = dest_wq & 0x7;
2225 case QM_MCR_NP_STATE_TEN_SCHED:
2226 case QM_MCR_NP_STATE_TRU_SCHED:
2227 case QM_MCR_NP_STATE_ACTIVE:
2228 case QM_MCR_NP_STATE_PARKED:
2230 mcc = qm_mc_start(low_p);
2231 mcc->alterfq.fqid = cpu_to_be32(fqid);
2232 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2233 while (!(mcr = qm_mc_result(low_p)))
2235 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2236 QM_MCR_VERB_ALTER_RETIRE);
2237 result = mcr->result; /* Make a copy as we reuse MCR below */
2239 if (result == QM_MCR_RESULT_PENDING) {
2240 /* Need to wait for the FQRN in the message ring, which
2241 * will only occur once the FQ has been drained. In
2242 * order for the FQ to drain the portal needs to be set
2243 * to dequeue from the channel the FQ is scheduled on
2245 const struct qm_mr_entry *msg;
2246 const struct qm_dqrr_entry *dqrr = NULL;
2248 __maybe_unused u16 dequeue_wq = 0;
2250 /* Flag that we need to drain FQ */
2253 if (channel >= qm_channel_pool1 &&
2254 channel < (u16)(qm_channel_pool1 + 15)) {
2255 /* Pool channel, enable the bit in the portal */
2256 dequeue_wq = (channel -
2257 qm_channel_pool1 + 1) << 4 | wq;
2258 } else if (channel < qm_channel_pool1) {
2259 /* Dedicated channel */
2262 pr_info("Cannot recover FQ 0x%x,"
2263 " it is scheduled on channel 0x%x",
2267 /* Set the sdqcr to drain this channel */
2268 if (channel < qm_channel_pool1)
2269 qm_dqrr_sdqcr_set(low_p,
2270 QM_SDQCR_TYPE_ACTIVE |
2271 QM_SDQCR_CHANNELS_DEDICATED);
2273 qm_dqrr_sdqcr_set(low_p,
2274 QM_SDQCR_TYPE_ACTIVE |
2275 QM_SDQCR_CHANNELS_POOL_CONV
2277 while (!found_fqrn) {
2278 /* Keep draining DQRR while checking the MR*/
2279 qm_dqrr_pvb_update(low_p);
2280 dqrr = qm_dqrr_current(low_p);
2282 qm_dqrr_cdc_consume_1ptr(
2284 qm_dqrr_pvb_update(low_p);
2285 qm_dqrr_next(low_p);
2286 dqrr = qm_dqrr_current(low_p);
2288 /* Process message ring too */
2289 qm_mr_pvb_update(low_p);
2290 msg = qm_mr_current(low_p);
2293 QM_MR_VERB_TYPE_MASK)
2297 qm_mr_cci_consume_to_current(low_p);
2298 qm_mr_pvb_update(low_p);
2299 msg = qm_mr_current(low_p);
2304 if (result != QM_MCR_RESULT_OK &&
2305 result != QM_MCR_RESULT_PENDING) {
2307 pr_err("qman_retire_fq failed on FQ 0x%x,"
2308 " result=0x%x\n", fqid, result);
2311 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2312 /* ORL had no entries, no need to wait until the
2317 /* Retirement succeeded, check to see if FQ needs
2320 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2321 /* FQ is Not Empty, drain using volatile DQ commands */
2324 const struct qm_dqrr_entry *dqrr = NULL;
2325 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2327 qm_dqrr_vdqcr_set(low_p, vdqcr);
2329 /* Wait for a dequeue to occur */
2330 while (dqrr == NULL) {
2331 qm_dqrr_pvb_update(low_p);
2332 dqrr = qm_dqrr_current(low_p);
2336 /* Process the dequeues, making sure to
2337 * empty the ring completely.
2340 if (dqrr->fqid == fqid &&
2341 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2343 qm_dqrr_cdc_consume_1ptr(low_p,
2345 qm_dqrr_pvb_update(low_p);
2346 qm_dqrr_next(low_p);
2347 dqrr = qm_dqrr_current(low_p);
2349 } while (fq_empty == 0);
2351 qm_dqrr_sdqcr_set(low_p, 0);
2353 /* Wait for the ORL to have been completely drained */
2354 while (orl_empty == 0) {
2355 const struct qm_mr_entry *msg;
2357 qm_mr_pvb_update(low_p);
2358 msg = qm_mr_current(low_p);
2360 if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
2364 qm_mr_cci_consume_to_current(low_p);
2365 qm_mr_pvb_update(low_p);
2366 msg = qm_mr_current(low_p);
2370 mcc = qm_mc_start(low_p);
2371 mcc->alterfq.fqid = cpu_to_be32(fqid);
2372 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2373 while (!(mcr = qm_mc_result(low_p)))
2375 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2376 QM_MCR_VERB_ALTER_OOS);
2377 if (mcr->result != QM_MCR_RESULT_OK) {
2379 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2385 case QM_MCR_NP_STATE_RETIRED:
2386 /* Send OOS Command */
2387 mcc = qm_mc_start(low_p);
2388 mcc->alterfq.fqid = cpu_to_be32(fqid);
2389 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2390 while (!(mcr = qm_mc_result(low_p)))
2392 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2393 QM_MCR_VERB_ALTER_OOS);
2395 pr_err("OOS Failed on FQID 0x%x\n", fqid);