2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright 2008-2016 Freescale Semiconductor Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * * Neither the name of the above-listed copyright holders nor the
18 * names of any contributors may be used to endorse or promote products
19 * derived from this software without specific prior written permission.
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_branch_prediction.h>
44 /* Compilation constants */
45 #define DQRR_MAXFILL 15
46 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
47 #define IRQNAME "QMan portal %d"
48 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
49 /* maximum number of DQRR entries to process in qman_poll() */
50 #define FSL_QMAN_POLL_LIMIT 8
52 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
53 * inter-processor locking only. Note, FQLOCK() is always called either under a
54 * local_irq_save() or from interrupt context - hence there's no need for irq
55 * protection (and indeed, attempting to nest irq-protection doesn't work, as
56 * the "irq en/disable" machinery isn't recursive...).
60 struct qman_fq *__fq478 = (fq); \
61 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
62 spin_lock(&__fq478->fqlock); \
64 #define FQUNLOCK(fq) \
66 struct qman_fq *__fq478 = (fq); \
67 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
68 spin_unlock(&__fq478->fqlock); \
71 static inline void fq_set(struct qman_fq *fq, u32 mask)
73 dpaa_set_bits(mask, &fq->flags);
76 static inline void fq_clear(struct qman_fq *fq, u32 mask)
78 dpaa_clear_bits(mask, &fq->flags);
81 static inline int fq_isset(struct qman_fq *fq, u32 mask)
83 return fq->flags & mask;
86 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
88 return !(fq->flags & mask);
93 /* PORTAL_BITS_*** - dynamic, strictly internal */
95 /* interrupt sources processed by portal_isr(), configurable */
96 unsigned long irq_sources;
97 u32 use_eqcr_ci_stashing;
98 u32 slowpoll; /* only used when interrupts are off */
99 /* only 1 volatile dequeue at a time */
100 struct qman_fq *vdqcr_owned;
102 int dqrr_disable_ref;
103 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
104 * handler is called instead.
106 qman_cb_dc_ern cb_dc_ern;
107 /* When the cpu-affine portal is activated, this is non-NULL */
108 const struct qm_portal_config *config;
109 struct dpa_rbtree retire_table;
110 char irqname[MAX_IRQNAME];
111 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
112 struct qman_cgrs *cgrs;
113 /* linked-list of CSCN handlers. */
114 struct list_head cgr_cbs;
117 /* track if memory was allocated by the driver */
118 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
119 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
120 * do byte swaps of DQRR read only memory. First entry must be aligned
121 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
122 * address (6 bits for address shift + 4 bits for the DQRR size).
124 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
125 __attribute__((aligned(1024)));
129 /* Global handler for DCP ERNs. Used when the portal receiving the message does
130 * not have a portal-specific handler.
132 static qman_cb_dc_ern cb_dc_ern;
134 static cpumask_t affine_mask;
135 static DEFINE_SPINLOCK(affine_mask_lock);
136 static u16 affine_channels[NR_CPUS];
137 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
139 static inline struct qman_portal *get_affine_portal(void)
141 return &RTE_PER_LCORE(qman_affine_portal);
144 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
145 * retirement notifications (the fact they are sometimes h/w-consumed means that
146 * contextB isn't always a s/w demux - and as we can't know which case it is
147 * when looking at the notification, we have to use the slow lookup for all of
148 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
149 * (though at most one of them should be the consumer), so this table isn't for
150 * all FQs - FQs are added when retirement commands are issued, and removed when
151 * they complete, which also massively reduces the size of this table.
153 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
155 * This is what everything can wait on, even if it migrates to a different cpu
156 * to the one whose affine portal it is waiting on.
158 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
160 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
162 int ret = fqtree_push(&p->retire_table, fq);
165 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
169 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
171 fqtree_del(&p->retire_table, fq);
174 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
176 return fqtree_find(&p->retire_table, fqid);
179 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
180 static void **qman_fq_lookup_table;
181 static size_t qman_fq_lookup_table_size;
183 int qman_setup_fq_lookup_table(size_t num_entries)
186 /* Allocate 1 more entry since the first entry is not used */
187 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
188 if (!qman_fq_lookup_table) {
189 pr_err("QMan: Could not allocate fq lookup table\n");
192 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
193 qman_fq_lookup_table_size = num_entries;
194 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
195 qman_fq_lookup_table,
196 (unsigned long)qman_fq_lookup_table_size);
200 /* global structure that maintains fq object mapping */
201 static DEFINE_SPINLOCK(fq_hash_table_lock);
203 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
207 spin_lock(&fq_hash_table_lock);
208 /* Can't use index zero because this has special meaning
209 * in context_b field.
211 for (i = 1; i < qman_fq_lookup_table_size; i++) {
212 if (qman_fq_lookup_table[i] == NULL) {
214 qman_fq_lookup_table[i] = fq;
215 spin_unlock(&fq_hash_table_lock);
219 spin_unlock(&fq_hash_table_lock);
223 static void clear_fq_table_entry(u32 entry)
225 spin_lock(&fq_hash_table_lock);
226 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
227 qman_fq_lookup_table[entry] = NULL;
228 spin_unlock(&fq_hash_table_lock);
231 static inline struct qman_fq *get_fq_table_entry(u32 entry)
233 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
234 return qman_fq_lookup_table[entry];
238 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
240 /* Byteswap the FQD to HW format */
241 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
242 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
243 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
244 fqd->context_b = cpu_to_be32(fqd->context_b);
245 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
246 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
249 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
251 /* Byteswap the FQD to CPU format */
252 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
253 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
254 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
255 fqd->context_b = be32_to_cpu(fqd->context_b);
256 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
259 static inline void cpu_to_hw_fd(struct qm_fd *fd)
261 fd->addr = cpu_to_be40(fd->addr);
262 fd->status = cpu_to_be32(fd->status);
263 fd->opaque = cpu_to_be32(fd->opaque);
266 static inline void hw_fd_to_cpu(struct qm_fd *fd)
268 fd->addr = be40_to_cpu(fd->addr);
269 fd->status = be32_to_cpu(fd->status);
270 fd->opaque = be32_to_cpu(fd->opaque);
273 /* In the case that slow- and fast-path handling are both done by qman_poll()
274 * (ie. because there is no interrupt handling), we ought to balance how often
275 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
276 * sources, so we call the fast poll 'n' times before calling the slow poll
277 * once. The idle decrementer constant is used when the last slow-poll detected
278 * no work to do, and the busy decrementer constant when the last slow-poll had
281 #define SLOW_POLL_IDLE 1000
282 #define SLOW_POLL_BUSY 10
283 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
284 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
285 unsigned int poll_limit);
287 /* Portal interrupt handler */
288 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
290 struct qman_portal *p = ptr;
292 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
293 * it could race against a Query Congestion State command also given
294 * as part of the handling of this interrupt source. We mustn't
295 * clear it a second time in this top-level function.
297 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
298 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
299 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
300 /* DQRR-handling if it's interrupt-driven */
301 if (is & QM_PIRQ_DQRI)
302 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
303 /* Handling of anything else that's interrupt-driven */
304 clear |= __poll_portal_slow(p, is);
305 qm_isr_status_clear(&p->p, clear);
309 /* This inner version is used privately by qman_create_affine_portal(), as well
310 * as by the exported qman_stop_dequeues().
312 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
314 if (!(p->dqrr_disable_ref++))
315 qm_dqrr_set_maxfill(&p->p, 0);
318 static int drain_mr_fqrni(struct qm_portal *p)
320 const struct qm_mr_entry *msg;
322 msg = qm_mr_current(p);
325 * if MR was full and h/w had other FQRNI entries to produce, we
326 * need to allow it time to produce those entries once the
327 * existing entries are consumed. A worst-case situation
328 * (fully-loaded system) means h/w sequencers may have to do 3-4
329 * other things before servicing the portal's MR pump, each of
330 * which (if slow) may take ~50 qman cycles (which is ~200
331 * processor cycles). So rounding up and then multiplying this
332 * worst-case estimate by a factor of 10, just to be
333 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
334 * one entry at a time, so h/w has an opportunity to produce new
335 * entries well before the ring has been fully consumed, so
336 * we're being *really* paranoid here.
338 u64 now, then = mfatb();
342 } while ((then + 10000) > now);
343 msg = qm_mr_current(p);
347 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
348 /* We aren't draining anything but FQRNIs */
349 pr_err("Found verb 0x%x in MR\n", msg->verb);
353 qm_mr_cci_consume(p, 1);
357 static inline int qm_eqcr_init(struct qm_portal *portal,
358 enum qm_eqcr_pmode pmode,
359 unsigned int eq_stash_thresh,
362 /* This use of 'register', as well as all other occurrences, is because
363 * it has been observed to generate much faster code with gcc than is
364 * otherwise the case.
366 register struct qm_eqcr *eqcr = &portal->eqcr;
370 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
371 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
372 qm_cl_invalidate(EQCR_CI);
373 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
374 eqcr->cursor = eqcr->ring + pi;
375 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
376 QM_EQCR_VERB_VBIT : 0;
377 eqcr->available = QM_EQCR_SIZE - 1 -
378 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
379 eqcr->ithresh = qm_in(EQCR_ITR);
380 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
384 cfg = (qm_in(CFG) & 0x00ffffff) |
385 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
386 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
387 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
392 static inline void qm_eqcr_finish(struct qm_portal *portal)
394 register struct qm_eqcr *eqcr = &portal->eqcr;
399 * Disable EQCI stashing because the QMan only
400 * presents the value it previously stashed to
401 * maintain coherency. Setting the stash threshold
402 * to 1 then 0 ensures that QMan has resyncronized
403 * its internal copy so that the portal is clean
404 * when it is reinitialized in the future
406 cfg = (qm_in(CFG) & 0x0fffffff) |
407 (1 << 28); /* QCSP_CFG: EST */
409 cfg &= 0x0fffffff; /* stash threshold = 0 */
412 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
413 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
415 /* Refresh EQCR CI cache value */
416 qm_cl_invalidate(EQCR_CI);
417 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
419 DPAA_ASSERT(!eqcr->busy);
420 if (pi != EQCR_PTR2IDX(eqcr->cursor))
421 pr_crit("losing uncommitted EQCR entries\n");
423 pr_crit("missing existing EQCR completions\n");
424 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
425 pr_crit("EQCR destroyed unquiesced\n");
428 static inline int qm_dqrr_init(struct qm_portal *portal,
429 __maybe_unused const struct qm_portal_config *config,
430 enum qm_dqrr_dmode dmode,
431 __maybe_unused enum qm_dqrr_pmode pmode,
432 enum qm_dqrr_cmode cmode, u8 max_fill)
434 register struct qm_dqrr *dqrr = &portal->dqrr;
437 /* Make sure the DQRR will be idle when we enable */
438 qm_out(DQRR_SDQCR, 0);
439 qm_out(DQRR_VDQCR, 0);
440 qm_out(DQRR_PDQCR, 0);
441 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
442 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
443 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
444 dqrr->cursor = dqrr->ring + dqrr->ci;
445 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
446 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
447 QM_DQRR_VERB_VBIT : 0;
448 dqrr->ithresh = qm_in(DQRR_ITR);
449 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
454 /* Invalidate every ring entry before beginning */
455 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
456 dccivac(qm_cl(dqrr->ring, cfg));
457 cfg = (qm_in(CFG) & 0xff000f00) |
458 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
459 ((dmode & 1) << 18) | /* DP */
460 ((cmode & 3) << 16) | /* DCM */
462 (0 ? 0x40 : 0) | /* Ignore RP */
463 (0 ? 0x10 : 0); /* Ignore SP */
465 qm_dqrr_set_maxfill(portal, max_fill);
469 static inline void qm_dqrr_finish(struct qm_portal *portal)
471 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
472 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
473 if ((dqrr->cmode != qm_dqrr_cdc) &&
474 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
475 pr_crit("Ignoring completed DQRR entries\n");
479 static inline int qm_mr_init(struct qm_portal *portal,
480 __maybe_unused enum qm_mr_pmode pmode,
481 enum qm_mr_cmode cmode)
483 register struct qm_mr *mr = &portal->mr;
486 mr->ring = portal->addr.ce + QM_CL_MR;
487 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
488 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
489 mr->cursor = mr->ring + mr->ci;
490 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
491 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
492 mr->ithresh = qm_in(MR_ITR);
493 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
497 cfg = (qm_in(CFG) & 0xfffff0ff) |
498 ((cmode & 1) << 8); /* QCSP_CFG:MM */
503 static inline void qm_mr_pvb_update(struct qm_portal *portal)
505 register struct qm_mr *mr = &portal->mr;
506 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
508 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
509 /* when accessing 'verb', use __raw_readb() to ensure that compiler
510 * inlining doesn't try to optimise out "excess reads".
512 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
513 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
515 mr->vbit ^= QM_MR_VERB_VBIT;
523 struct qman_portal *qman_create_portal(
524 struct qman_portal *portal,
525 const struct qm_portal_config *c,
526 const struct qman_cgrs *cgrs)
535 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
537 * prep the low-level portal struct with the mapped addresses from the
538 * config, everything that follows depends on it and "config" is more
541 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
542 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
544 * If CI-stashing is used, the current defaults use a threshold of 3,
545 * and stash with high-than-DQRR priority.
547 if (qm_eqcr_init(p, qm_eqcr_pvb,
548 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
549 pr_err("Qman EQCR initialisation failed\n");
552 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
553 qm_dqrr_cdc, DQRR_MAXFILL)) {
554 pr_err("Qman DQRR initialisation failed\n");
557 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
558 pr_err("Qman MR initialisation failed\n");
562 pr_err("Qman MC initialisation failed\n");
566 /* static interrupt-gating controls */
567 qm_dqrr_set_ithresh(p, 0);
568 qm_mr_set_ithresh(p, 0);
569 qm_isr_set_iperiod(p, 0);
570 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
573 /* initial snapshot is no-depletion */
574 qman_cgrs_init(&portal->cgrs[1]);
576 portal->cgrs[0] = *cgrs;
578 /* if the given mask is NULL, assume all CGRs can be seen */
579 qman_cgrs_fill(&portal->cgrs[0]);
580 INIT_LIST_HEAD(&portal->cgr_cbs);
581 spin_lock_init(&portal->cgr_lock);
583 portal->slowpoll = 0;
584 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
585 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
586 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
587 portal->dqrr_disable_ref = 0;
588 portal->cb_dc_ern = NULL;
589 sprintf(buf, "qportal-%d", c->channel);
590 dpa_rbtree_init(&portal->retire_table);
592 qm_isr_disable_write(p, isdr);
593 portal->irq_sources = 0;
594 qm_isr_enable_write(p, portal->irq_sources);
595 qm_isr_status_clear(p, 0xffffffff);
596 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
597 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
599 pr_err("request_irq() failed\n");
603 /* Need EQCR to be empty before continuing */
604 isdr &= ~QM_PIRQ_EQCI;
605 qm_isr_disable_write(p, isdr);
606 ret = qm_eqcr_get_fill(p);
608 pr_err("Qman EQCR unclean\n");
609 goto fail_eqcr_empty;
611 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
612 qm_isr_disable_write(p, isdr);
613 if (qm_dqrr_current(p)) {
614 pr_err("Qman DQRR unclean\n");
615 qm_dqrr_cdc_consume_n(p, 0xffff);
617 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
618 /* special handling, drain just in case it's a few FQRNIs */
619 if (drain_mr_fqrni(p))
620 goto fail_dqrr_mr_empty;
624 qm_isr_disable_write(p, 0);
626 /* Write a sane SDQCR */
627 qm_dqrr_sdqcr_set(p, portal->sdqcr);
631 free_irq(c->irq, portal);
634 spin_lock_destroy(&portal->cgr_lock);
647 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
648 const struct qman_cgrs *cgrs)
650 struct qman_portal *res;
651 struct qman_portal *portal = get_affine_portal();
652 /* A criteria for calling this function (from qman_driver.c) is that
653 * we're already affine to the cpu and won't schedule onto another cpu.
656 res = qman_create_portal(portal, c, cgrs);
658 spin_lock(&affine_mask_lock);
659 CPU_SET(c->cpu, &affine_mask);
660 affine_channels[c->cpu] =
662 spin_unlock(&affine_mask_lock);
668 void qman_destroy_portal(struct qman_portal *qm)
670 const struct qm_portal_config *pcfg;
672 /* Stop dequeues on the portal */
673 qm_dqrr_sdqcr_set(&qm->p, 0);
676 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
677 * something related to QM_PIRQ_EQCI, this may need fixing.
678 * Also, due to the prefetching model used for CI updates in the enqueue
679 * path, this update will only invalidate the CI cacheline *after*
680 * working on it, so we need to call this twice to ensure a full update
681 * irrespective of where the enqueue processing was at when the teardown
684 qm_eqcr_cce_update(&qm->p);
685 qm_eqcr_cce_update(&qm->p);
688 free_irq(pcfg->irq, qm);
691 qm_mc_finish(&qm->p);
692 qm_mr_finish(&qm->p);
693 qm_dqrr_finish(&qm->p);
694 qm_eqcr_finish(&qm->p);
698 spin_lock_destroy(&qm->cgr_lock);
701 const struct qm_portal_config *qman_destroy_affine_portal(void)
703 /* We don't want to redirect if we're a slave, use "raw" */
704 struct qman_portal *qm = get_affine_portal();
705 const struct qm_portal_config *pcfg;
711 qman_destroy_portal(qm);
713 spin_lock(&affine_mask_lock);
714 CPU_CLR(cpu, &affine_mask);
715 spin_unlock(&affine_mask_lock);
719 int qman_get_portal_index(void)
721 struct qman_portal *p = get_affine_portal();
722 return p->config->index;
725 /* Inline helper to reduce nesting in __poll_portal_slow() */
726 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
727 const struct qm_mr_entry *msg, u8 verb)
731 case QM_MR_VERB_FQRL:
732 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
733 fq_clear(fq, QMAN_FQ_STATE_ORL);
736 case QM_MR_VERB_FQRN:
737 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
738 (fq->state == qman_fq_state_sched));
739 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
740 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
741 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
742 fq_set(fq, QMAN_FQ_STATE_NE);
743 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
744 fq_set(fq, QMAN_FQ_STATE_ORL);
747 fq->state = qman_fq_state_retired;
749 case QM_MR_VERB_FQPN:
750 DPAA_ASSERT(fq->state == qman_fq_state_sched);
751 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
752 fq->state = qman_fq_state_parked;
757 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
759 const struct qm_mr_entry *msg;
760 struct qm_mr_entry swapped_msg;
762 if (is & QM_PIRQ_CSCI) {
763 struct qman_cgrs rr, c;
764 struct qm_mc_result *mcr;
765 struct qman_cgr *cgr;
767 spin_lock(&p->cgr_lock);
769 * The CSCI bit must be cleared _before_ issuing the
770 * Query Congestion State command, to ensure that a long
771 * CGR State Change callback cannot miss an intervening
774 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
776 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
777 while (!(mcr = qm_mc_result(&p->p)))
779 /* mask out the ones I'm not interested in */
780 qman_cgrs_and(&rr, (const struct qman_cgrs *)
781 &mcr->querycongestion.state, &p->cgrs[0]);
782 /* check previous snapshot for delta, enter/exit congestion */
783 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
784 /* update snapshot */
785 qman_cgrs_cp(&p->cgrs[1], &rr);
786 /* Invoke callback */
787 list_for_each_entry(cgr, &p->cgr_cbs, node)
788 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
789 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
790 spin_unlock(&p->cgr_lock);
793 if (is & QM_PIRQ_EQRI) {
794 qm_eqcr_cce_update(&p->p);
795 qm_eqcr_set_ithresh(&p->p, 0);
796 wake_up(&affine_queue);
799 if (is & QM_PIRQ_MRI) {
803 qm_mr_pvb_update(&p->p);
804 msg = qm_mr_current(&p->p);
808 hw_fd_to_cpu(&swapped_msg.ern.fd);
809 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
810 /* The message is a software ERN iff the 0x20 bit is set */
813 case QM_MR_VERB_FQRNI:
814 /* nada, we drop FQRNIs on the floor */
816 case QM_MR_VERB_FQRN:
817 case QM_MR_VERB_FQRL:
818 /* Lookup in the retirement table */
819 fq = table_find_fq(p,
820 be32_to_cpu(msg->fq.fqid));
822 fq_state_change(p, fq, &swapped_msg, verb);
824 fq->cb.fqs(p, fq, &swapped_msg);
826 case QM_MR_VERB_FQPN:
828 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
829 fq = get_fq_table_entry(
830 be32_to_cpu(msg->fq.contextB));
832 fq = (void *)(uintptr_t)
833 be32_to_cpu(msg->fq.contextB);
835 fq_state_change(p, fq, msg, verb);
837 fq->cb.fqs(p, fq, &swapped_msg);
839 case QM_MR_VERB_DC_ERN:
842 p->cb_dc_ern(p, msg);
846 static int warn_once;
849 pr_crit("Leaking DCP ERNs!\n");
855 pr_crit("Invalid MR verb 0x%02x\n", verb);
858 /* Its a software ERN */
859 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
860 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
862 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
864 fq->cb.ern(p, fq, &swapped_msg);
870 qm_mr_cci_consume(&p->p, num);
873 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
874 * processing. If that interrupt source has meanwhile been re-asserted,
875 * we mustn't clear it here (or in the top-level interrupt handler).
877 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
881 * remove some slowish-path stuff from the "fast path" and make sure it isn't
884 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
886 p->vdqcr_owned = NULL;
888 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
890 wake_up(&affine_queue);
894 * The only states that would conflict with other things if they ran at the
895 * same time on the same cpu are:
897 * (i) setting/clearing vdqcr_owned, and
898 * (ii) clearing the NE (Not Empty) flag.
900 * Both are safe. Because;
902 * (i) this clearing can only occur after qman_set_vdq() has set the
903 * vdqcr_owned field (which it does before setting VDQCR), and
904 * qman_volatile_dequeue() blocks interrupts and preemption while this is
905 * done so that we can't interfere.
906 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
907 * with (i) that API prevents us from interfering until it's safe.
909 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
910 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
911 * advantage comes from this function not having to "lock" anything at all.
913 * Note also that the callbacks are invoked at points which are safe against the
914 * above potential conflicts, but that this function itself is not re-entrant
915 * (this is because the function tracks one end of each FIFO in the portal and
916 * we do *not* want to lock that). So the consequence is that it is safe for
917 * user callbacks to call into any QMan API.
919 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
920 unsigned int poll_limit)
922 const struct qm_dqrr_entry *dq;
924 enum qman_cb_dqrr_result res;
925 unsigned int limit = 0;
926 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
927 struct qm_dqrr_entry *shadow;
930 qm_dqrr_pvb_update(&p->p);
931 dq = qm_dqrr_current(&p->p);
934 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
935 /* If running on an LE system the fields of the
936 * dequeue entry must be swapper. Because the
937 * QMan HW will ignore writes the DQRR entry is
938 * copied and the index stored within the copy
940 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
943 shadow->fqid = be32_to_cpu(shadow->fqid);
944 shadow->contextB = be32_to_cpu(shadow->contextB);
945 shadow->seqnum = be16_to_cpu(shadow->seqnum);
946 hw_fd_to_cpu(&shadow->fd);
949 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
951 * VDQCR: don't trust context_b as the FQ may have
952 * been configured for h/w consumption and we're
953 * draining it post-retirement.
957 * We only set QMAN_FQ_STATE_NE when retiring, so we
958 * only need to check for clearing it when doing
959 * volatile dequeues. It's one less thing to check
960 * in the critical path (SDQCR).
962 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
963 fq_clear(fq, QMAN_FQ_STATE_NE);
965 * This is duplicated from the SDQCR code, but we
966 * have stuff to do before *and* after this callback,
967 * and we don't want multiple if()s in the critical
970 res = fq->cb.dqrr(p, fq, dq);
971 if (res == qman_cb_dqrr_stop)
973 /* Check for VDQCR completion */
974 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
977 /* SDQCR: context_b points to the FQ */
978 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
979 fq = get_fq_table_entry(dq->contextB);
981 fq = (void *)(uintptr_t)dq->contextB;
983 /* Now let the callback do its stuff */
984 res = fq->cb.dqrr(p, fq, dq);
986 * The callback can request that we exit without
987 * consuming this entry nor advancing;
989 if (res == qman_cb_dqrr_stop)
992 /* Interpret 'dq' from a driver perspective. */
994 * Parking isn't possible unless HELDACTIVE was set. NB,
995 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
996 * check for HELDACTIVE to cover both.
998 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
999 (res != qman_cb_dqrr_park));
1000 /* just means "skip it, I'll consume it myself later on" */
1001 if (res != qman_cb_dqrr_defer)
1002 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1003 res == qman_cb_dqrr_park);
1005 qm_dqrr_next(&p->p);
1007 * Entry processed and consumed, increment our counter. The
1008 * callback can request that we exit after consuming the
1009 * entry, and we also exit if we reach our processing limit,
1010 * so loop back only if neither of these conditions is met.
1012 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1017 u16 qman_affine_channel(int cpu)
1020 struct qman_portal *portal = get_affine_portal();
1022 cpu = portal->config->cpu;
1024 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1025 return affine_channels[cpu];
1028 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1030 struct qman_portal *p = get_affine_portal();
1031 const struct qm_dqrr_entry *dq;
1032 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1033 struct qm_dqrr_entry *shadow;
1036 qm_dqrr_pvb_update(&p->p);
1037 dq = qm_dqrr_current(&p->p);
1041 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1042 /* Invalid DQRR - put the portal and consume the DQRR.
1043 * Return NULL to user as no packet is seen.
1045 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1049 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1050 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1053 shadow->fqid = be32_to_cpu(shadow->fqid);
1054 shadow->contextB = be32_to_cpu(shadow->contextB);
1055 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1056 hw_fd_to_cpu(&shadow->fd);
1059 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1060 fq_clear(fq, QMAN_FQ_STATE_NE);
1062 return (struct qm_dqrr_entry *)dq;
1065 void qman_dqrr_consume(struct qman_fq *fq,
1066 struct qm_dqrr_entry *dq)
1068 struct qman_portal *p = get_affine_portal();
1070 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1073 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1074 qm_dqrr_next(&p->p);
1077 int qman_poll_dqrr(unsigned int limit)
1079 struct qman_portal *p = get_affine_portal();
1082 ret = __poll_portal_fast(p, limit);
1086 void qman_poll(void)
1088 struct qman_portal *p = get_affine_portal();
1090 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1091 if (!(p->slowpoll--)) {
1092 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1093 u32 active = __poll_portal_slow(p, is);
1096 qm_isr_status_clear(&p->p, active);
1097 p->slowpoll = SLOW_POLL_BUSY;
1099 p->slowpoll = SLOW_POLL_IDLE;
1102 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1103 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1106 void qman_stop_dequeues(void)
1108 struct qman_portal *p = get_affine_portal();
1110 qman_stop_dequeues_ex(p);
1113 void qman_start_dequeues(void)
1115 struct qman_portal *p = get_affine_portal();
1117 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1118 if (!(--p->dqrr_disable_ref))
1119 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1122 void qman_static_dequeue_add(u32 pools)
1124 struct qman_portal *p = get_affine_portal();
1126 pools &= p->config->pools;
1128 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1131 void qman_static_dequeue_del(u32 pools)
1133 struct qman_portal *p = get_affine_portal();
1135 pools &= p->config->pools;
1137 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1140 u32 qman_static_dequeue_get(void)
1142 struct qman_portal *p = get_affine_portal();
1146 void qman_dca(struct qm_dqrr_entry *dq, int park_request)
1148 struct qman_portal *p = get_affine_portal();
1150 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1153 /* Frame queue API */
1154 static const char *mcr_result_str(u8 result)
1157 case QM_MCR_RESULT_NULL:
1158 return "QM_MCR_RESULT_NULL";
1159 case QM_MCR_RESULT_OK:
1160 return "QM_MCR_RESULT_OK";
1161 case QM_MCR_RESULT_ERR_FQID:
1162 return "QM_MCR_RESULT_ERR_FQID";
1163 case QM_MCR_RESULT_ERR_FQSTATE:
1164 return "QM_MCR_RESULT_ERR_FQSTATE";
1165 case QM_MCR_RESULT_ERR_NOTEMPTY:
1166 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1167 case QM_MCR_RESULT_PENDING:
1168 return "QM_MCR_RESULT_PENDING";
1169 case QM_MCR_RESULT_ERR_BADCOMMAND:
1170 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1172 return "<unknown MCR result>";
1175 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1178 struct qm_mcr_queryfq_np np;
1179 struct qm_mc_command *mcc;
1180 struct qm_mc_result *mcr;
1181 struct qman_portal *p;
1183 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1184 int ret = qman_alloc_fqid(&fqid);
1189 spin_lock_init(&fq->fqlock);
1192 fq->state = qman_fq_state_oos;
1193 fq->cgr_groupid = 0;
1194 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1195 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1196 pr_info("Find empty table entry failed\n");
1200 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1202 /* Everything else is AS_IS support */
1203 p = get_affine_portal();
1204 mcc = qm_mc_start(&p->p);
1205 mcc->queryfq.fqid = cpu_to_be32(fqid);
1206 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1207 while (!(mcr = qm_mc_result(&p->p)))
1209 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1210 if (mcr->result != QM_MCR_RESULT_OK) {
1211 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1214 fqd = mcr->queryfq.fqd;
1215 hw_fqd_to_cpu(&fqd);
1216 mcc = qm_mc_start(&p->p);
1217 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1218 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1219 while (!(mcr = qm_mc_result(&p->p)))
1221 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1222 if (mcr->result != QM_MCR_RESULT_OK) {
1223 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1226 np = mcr->queryfq_np;
1227 /* Phew, have queryfq and queryfq_np results, stitch together
1228 * the FQ object from those.
1230 fq->cgr_groupid = fqd.cgid;
1231 switch (np.state & QM_MCR_NP_STATE_MASK) {
1232 case QM_MCR_NP_STATE_OOS:
1234 case QM_MCR_NP_STATE_RETIRED:
1235 fq->state = qman_fq_state_retired;
1237 fq_set(fq, QMAN_FQ_STATE_NE);
1239 case QM_MCR_NP_STATE_TEN_SCHED:
1240 case QM_MCR_NP_STATE_TRU_SCHED:
1241 case QM_MCR_NP_STATE_ACTIVE:
1242 fq->state = qman_fq_state_sched;
1243 if (np.state & QM_MCR_NP_STATE_R)
1244 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1246 case QM_MCR_NP_STATE_PARKED:
1247 fq->state = qman_fq_state_parked;
1250 DPAA_ASSERT(NULL == "invalid FQ state");
1252 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1253 fq->state |= QMAN_FQ_STATE_CGR_EN;
1256 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1257 qman_release_fqid(fqid);
1261 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1264 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1265 * quiesced. Instead, run some checks.
1267 switch (fq->state) {
1268 case qman_fq_state_parked:
1269 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1270 case qman_fq_state_oos:
1271 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1272 qman_release_fqid(fq->fqid);
1273 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1274 clear_fq_table_entry(fq->key);
1280 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1283 u32 qman_fq_fqid(struct qman_fq *fq)
1288 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1296 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1298 struct qm_mc_command *mcc;
1299 struct qm_mc_result *mcr;
1300 struct qman_portal *p;
1302 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1303 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1305 if ((fq->state != qman_fq_state_oos) &&
1306 (fq->state != qman_fq_state_parked))
1308 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1309 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1312 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1313 /* And can't be set at the same time as TDTHRESH */
1314 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1317 /* Issue an INITFQ_[PARKED|SCHED] management command */
1318 p = get_affine_portal();
1320 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1321 ((fq->state != qman_fq_state_oos) &&
1322 (fq->state != qman_fq_state_parked)))) {
1326 mcc = qm_mc_start(&p->p);
1328 mcc->initfq = *opts;
1329 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1330 mcc->initfq.count = 0;
1332 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1333 * demux pointer. Otherwise, the caller-provided value is allowed to
1334 * stand, don't overwrite it.
1336 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1339 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1340 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1341 mcc->initfq.fqd.context_b = fq->key;
1343 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1346 * and the physical address - NB, if the user wasn't trying to
1347 * set CONTEXTA, clear the stashing settings.
1349 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1350 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1351 memset(&mcc->initfq.fqd.context_a, 0,
1352 sizeof(mcc->initfq.fqd.context_a));
1354 phys_fq = rte_mem_virt2phy(fq);
1355 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1358 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1359 mcc->initfq.fqd.dest.channel = p->config->channel;
1360 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1361 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1362 mcc->initfq.fqd.dest.wq = 4;
1365 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1366 cpu_to_hw_fqd(&mcc->initfq.fqd);
1367 qm_mc_commit(&p->p, myverb);
1368 while (!(mcr = qm_mc_result(&p->p)))
1370 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1372 if (res != QM_MCR_RESULT_OK) {
1377 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1378 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1379 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1381 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1383 if (opts->we_mask & QM_INITFQ_WE_CGID)
1384 fq->cgr_groupid = opts->fqd.cgid;
1386 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1387 qman_fq_state_sched : qman_fq_state_parked;
1392 int qman_schedule_fq(struct qman_fq *fq)
1394 struct qm_mc_command *mcc;
1395 struct qm_mc_result *mcr;
1396 struct qman_portal *p;
1401 if (fq->state != qman_fq_state_parked)
1403 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1404 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1407 /* Issue a ALTERFQ_SCHED management command */
1408 p = get_affine_portal();
1411 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1412 (fq->state != qman_fq_state_parked))) {
1416 mcc = qm_mc_start(&p->p);
1417 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1418 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1419 while (!(mcr = qm_mc_result(&p->p)))
1421 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1423 if (res != QM_MCR_RESULT_OK) {
1427 fq->state = qman_fq_state_sched;
1434 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1436 struct qm_mc_command *mcc;
1437 struct qm_mc_result *mcr;
1438 struct qman_portal *p;
1443 if ((fq->state != qman_fq_state_parked) &&
1444 (fq->state != qman_fq_state_sched))
1446 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1447 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1450 p = get_affine_portal();
1453 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1454 (fq->state == qman_fq_state_retired) ||
1455 (fq->state == qman_fq_state_oos))) {
1459 rval = table_push_fq(p, fq);
1462 mcc = qm_mc_start(&p->p);
1463 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1464 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1465 while (!(mcr = qm_mc_result(&p->p)))
1467 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1470 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1471 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1472 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1473 * friendly, otherwise the caller doesn't necessarily have a fully
1474 * "retired" FQ on return even if the retirement was immediate. However
1475 * this does mean some code duplication between here and
1476 * fq_state_change().
1478 if (likely(res == QM_MCR_RESULT_OK)) {
1480 /* Process 'fq' right away, we'll ignore FQRNI */
1481 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1482 fq_set(fq, QMAN_FQ_STATE_NE);
1483 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1484 fq_set(fq, QMAN_FQ_STATE_ORL);
1486 table_del_fq(p, fq);
1489 fq->state = qman_fq_state_retired;
1492 * Another issue with supporting "immediate" retirement
1493 * is that we're forced to drop FQRNIs, because by the
1494 * time they're seen it may already be "too late" (the
1495 * fq may have been OOS'd and free()'d already). But if
1496 * the upper layer wants a callback whether it's
1497 * immediate or not, we have to fake a "MR" entry to
1498 * look like an FQRNI...
1500 struct qm_mr_entry msg;
1502 msg.verb = QM_MR_VERB_FQRNI;
1503 msg.fq.fqs = mcr->alterfq.fqs;
1504 msg.fq.fqid = fq->fqid;
1505 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1506 msg.fq.contextB = fq->key;
1508 msg.fq.contextB = (u32)(uintptr_t)fq;
1510 fq->cb.fqs(p, fq, &msg);
1512 } else if (res == QM_MCR_RESULT_PENDING) {
1514 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1517 table_del_fq(p, fq);
1524 int qman_oos_fq(struct qman_fq *fq)
1526 struct qm_mc_command *mcc;
1527 struct qm_mc_result *mcr;
1528 struct qman_portal *p;
1533 if (fq->state != qman_fq_state_retired)
1535 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1536 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1539 p = get_affine_portal();
1541 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1542 (fq->state != qman_fq_state_retired))) {
1546 mcc = qm_mc_start(&p->p);
1547 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1548 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1549 while (!(mcr = qm_mc_result(&p->p)))
1551 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1553 if (res != QM_MCR_RESULT_OK) {
1557 fq->state = qman_fq_state_oos;
1563 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1565 struct qm_mc_command *mcc;
1566 struct qm_mc_result *mcr;
1567 struct qman_portal *p;
1573 if ((fq->state == qman_fq_state_oos) ||
1574 (fq->state == qman_fq_state_retired) ||
1575 (fq->state == qman_fq_state_parked))
1578 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1579 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1582 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1583 p = get_affine_portal();
1585 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1586 (fq->state == qman_fq_state_parked) ||
1587 (fq->state == qman_fq_state_oos) ||
1588 (fq->state == qman_fq_state_retired))) {
1592 mcc = qm_mc_start(&p->p);
1593 mcc->alterfq.fqid = fq->fqid;
1594 mcc->alterfq.count = 0;
1595 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1597 qm_mc_commit(&p->p, myverb);
1598 while (!(mcr = qm_mc_result(&p->p)))
1600 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1603 if (res != QM_MCR_RESULT_OK) {
1612 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1614 struct qm_mc_command *mcc;
1615 struct qm_mc_result *mcr;
1616 struct qman_portal *p = get_affine_portal();
1620 mcc = qm_mc_start(&p->p);
1621 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1622 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1623 while (!(mcr = qm_mc_result(&p->p)))
1625 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1627 if (res == QM_MCR_RESULT_OK)
1628 *fqd = mcr->queryfq.fqd;
1630 if (res != QM_MCR_RESULT_OK)
1635 int qman_query_fq_has_pkts(struct qman_fq *fq)
1637 struct qm_mc_command *mcc;
1638 struct qm_mc_result *mcr;
1639 struct qman_portal *p = get_affine_portal();
1644 mcc = qm_mc_start(&p->p);
1645 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1646 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1647 while (!(mcr = qm_mc_result(&p->p)))
1650 if (res == QM_MCR_RESULT_OK)
1651 ret = !!mcr->queryfq_np.frm_cnt;
1655 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1657 struct qm_mc_command *mcc;
1658 struct qm_mc_result *mcr;
1659 struct qman_portal *p = get_affine_portal();
1663 mcc = qm_mc_start(&p->p);
1664 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1665 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1666 while (!(mcr = qm_mc_result(&p->p)))
1668 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1670 if (res == QM_MCR_RESULT_OK) {
1671 *np = mcr->queryfq_np;
1672 np->fqd_link = be24_to_cpu(np->fqd_link);
1673 np->odp_seq = be16_to_cpu(np->odp_seq);
1674 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1675 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1676 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1677 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1678 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1679 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1680 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1681 np->ics_surp = be16_to_cpu(np->ics_surp);
1682 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1683 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1684 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1685 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1686 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1687 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1688 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1690 if (res == QM_MCR_RESULT_ERR_FQID)
1692 else if (res != QM_MCR_RESULT_OK)
1697 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1699 struct qm_mc_command *mcc;
1700 struct qm_mc_result *mcr;
1701 struct qman_portal *p = get_affine_portal();
1705 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1706 QM_MCR_VERB_QUERYWQ;
1707 mcc = qm_mc_start(&p->p);
1708 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1709 qm_mc_commit(&p->p, myverb);
1710 while (!(mcr = qm_mc_result(&p->p)))
1712 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1714 if (res == QM_MCR_RESULT_OK) {
1717 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
1718 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
1719 for (i = 0; i < array_len; i++)
1720 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
1722 if (res != QM_MCR_RESULT_OK) {
1723 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
1729 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
1730 struct qm_mcr_cgrtestwrite *result)
1732 struct qm_mc_command *mcc;
1733 struct qm_mc_result *mcr;
1734 struct qman_portal *p = get_affine_portal();
1738 mcc = qm_mc_start(&p->p);
1739 mcc->cgrtestwrite.cgid = cgr->cgrid;
1740 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
1741 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
1742 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
1743 while (!(mcr = qm_mc_result(&p->p)))
1745 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
1747 if (res == QM_MCR_RESULT_OK)
1748 *result = mcr->cgrtestwrite;
1749 if (res != QM_MCR_RESULT_OK) {
1750 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
1756 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
1758 struct qm_mc_command *mcc;
1759 struct qm_mc_result *mcr;
1760 struct qman_portal *p = get_affine_portal();
1764 mcc = qm_mc_start(&p->p);
1765 mcc->querycgr.cgid = cgr->cgrid;
1766 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
1767 while (!(mcr = qm_mc_result(&p->p)))
1769 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
1771 if (res == QM_MCR_RESULT_OK)
1772 *cgrd = mcr->querycgr;
1773 if (res != QM_MCR_RESULT_OK) {
1774 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
1777 cgrd->cgr.wr_parm_g.word =
1778 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
1779 cgrd->cgr.wr_parm_y.word =
1780 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
1781 cgrd->cgr.wr_parm_r.word =
1782 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
1783 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
1784 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
1785 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
1786 cgrd->cscn_targ_swp[i] =
1787 be32_to_cpu(cgrd->cscn_targ_swp[i]);
1791 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
1793 struct qm_mc_result *mcr;
1794 struct qman_portal *p = get_affine_portal();
1799 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1800 while (!(mcr = qm_mc_result(&p->p)))
1802 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
1803 QM_MCC_VERB_QUERYCONGESTION);
1805 if (res == QM_MCR_RESULT_OK)
1806 *congestion = mcr->querycongestion;
1807 if (res != QM_MCR_RESULT_OK) {
1808 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
1811 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
1812 congestion->state.state[i] =
1813 be32_to_cpu(congestion->state.state[i]);
1817 int qman_set_vdq(struct qman_fq *fq, u16 num)
1819 struct qman_portal *p = get_affine_portal();
1823 vdqcr = QM_VDQCR_EXACT;
1824 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
1826 if ((fq->state != qman_fq_state_parked) &&
1827 (fq->state != qman_fq_state_retired)) {
1831 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
1835 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1837 if (!p->vdqcr_owned) {
1839 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1841 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1843 p->vdqcr_owned = fq;
1848 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1854 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
1857 struct qman_portal *p;
1860 if ((fq->state != qman_fq_state_parked) &&
1861 (fq->state != qman_fq_state_retired))
1863 if (vdqcr & QM_VDQCR_FQID_MASK)
1865 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1867 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1869 p = get_affine_portal();
1871 if (!p->vdqcr_owned) {
1873 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1875 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1877 p->vdqcr_owned = fq;
1885 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1889 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
1892 qm_eqcr_cce_prefetch(&p->p);
1894 qm_eqcr_cce_update(&p->p);
1897 int qman_eqcr_is_empty(void)
1899 struct qman_portal *p = get_affine_portal();
1902 update_eqcr_ci(p, 0);
1903 avail = qm_eqcr_get_fill(&p->p);
1904 return (avail == 0);
1907 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
1910 struct qman_portal *p = get_affine_portal();
1912 p->cb_dc_ern = handler;
1914 cb_dc_ern = handler;
1917 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
1919 const struct qm_fd *fd,
1922 struct qm_eqcr_entry *eq;
1925 if (p->use_eqcr_ci_stashing) {
1927 * The stashing case is easy, only update if we need to in
1928 * order to try and liberate ring entries.
1930 eq = qm_eqcr_start_stash(&p->p);
1933 * The non-stashing case is harder, need to prefetch ahead of
1936 avail = qm_eqcr_get_avail(&p->p);
1938 update_eqcr_ci(p, avail);
1939 eq = qm_eqcr_start_no_stash(&p->p);
1945 if (flags & QMAN_ENQUEUE_FLAG_DCA)
1946 eq->dca = QM_EQCR_DCA_ENABLE |
1947 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
1948 QM_EQCR_DCA_PARK : 0) |
1949 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
1950 eq->fqid = cpu_to_be32(fq->fqid);
1951 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1952 eq->tag = cpu_to_be32(fq->key);
1954 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1957 cpu_to_hw_fd(&eq->fd);
1961 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
1963 struct qman_portal *p = get_affine_portal();
1964 struct qm_eqcr_entry *eq;
1966 eq = try_p_eq_start(p, fq, fd, flags);
1969 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
1970 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
1971 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
1972 /* Factor the below out, it's used from qman_enqueue_orp() too */
1976 int qman_enqueue_multi(struct qman_fq *fq,
1977 const struct qm_fd *fd,
1980 struct qman_portal *p = get_affine_portal();
1981 struct qm_portal *portal = &p->p;
1983 register struct qm_eqcr *eqcr = &portal->eqcr;
1984 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
1986 u8 i, diff, old_ci, sent = 0;
1988 /* Update the available entries if no entry is free */
1989 if (!eqcr->available) {
1991 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
1992 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
1993 eqcr->available += diff;
1998 /* try to send as many frames as possible */
1999 while (eqcr->available && frames_to_send--) {
2000 eq->fqid = cpu_to_be32(fq->fqid);
2001 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2002 eq->tag = cpu_to_be32(fq->key);
2004 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2006 eq->fd.opaque_addr = fd->opaque_addr;
2007 eq->fd.addr = cpu_to_be40(fd->addr);
2008 eq->fd.status = cpu_to_be32(fd->status);
2009 eq->fd.opaque = cpu_to_be32(fd->opaque);
2011 eq = (void *)((unsigned long)(eq + 1) &
2012 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2019 /* In order for flushes to complete faster, all lines are recorded in
2023 for (i = 0; i < sent; i++) {
2024 eq->__dont_write_directly__verb =
2025 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2027 eq = (void *)((unsigned long)(eq + 1) &
2028 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2029 if (unlikely((prev_eq + 1) != eq))
2030 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2033 /* We need to flush all the lines but without load/store operations
2037 for (i = 0; i < sent; i++) {
2039 eq = (void *)((unsigned long)(eq + 1) &
2040 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2042 /* Update cursor for the next call */
2047 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2048 struct qman_fq *orp, u16 orp_seqnum)
2050 struct qman_portal *p = get_affine_portal();
2051 struct qm_eqcr_entry *eq;
2053 eq = try_p_eq_start(p, fq, fd, flags);
2056 /* Process ORP-specifics here */
2057 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2058 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2060 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2061 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2062 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2064 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2065 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2067 eq->seqnum = cpu_to_be16(orp_seqnum);
2068 eq->orp = cpu_to_be32(orp->fqid);
2069 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2070 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2071 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2072 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2073 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2078 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2079 struct qm_mcc_initcgr *opts)
2081 struct qm_mc_command *mcc;
2082 struct qm_mc_result *mcr;
2083 struct qman_portal *p = get_affine_portal();
2086 u8 verb = QM_MCC_VERB_MODIFYCGR;
2088 mcc = qm_mc_start(&p->p);
2090 mcc->initcgr = *opts;
2091 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2092 mcc->initcgr.cgr.wr_parm_g.word =
2093 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2094 mcc->initcgr.cgr.wr_parm_y.word =
2095 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2096 mcc->initcgr.cgr.wr_parm_r.word =
2097 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2098 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2099 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2101 mcc->initcgr.cgid = cgr->cgrid;
2102 if (flags & QMAN_CGR_FLAG_USE_INIT)
2103 verb = QM_MCC_VERB_INITCGR;
2104 qm_mc_commit(&p->p, verb);
2105 while (!(mcr = qm_mc_result(&p->p)))
2108 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2110 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2113 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2114 QM_CHANNEL_SWPORTAL0))
2115 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2116 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2118 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2119 struct qm_mcc_initcgr *opts)
2121 struct qm_mcr_querycgr cgr_state;
2122 struct qm_mcc_initcgr local_opts;
2124 struct qman_portal *p;
2126 /* We have to check that the provided CGRID is within the limits of the
2127 * data-structures, for obvious reasons. However we'll let h/w take
2128 * care of determining whether it's within the limits of what exists on
2131 if (cgr->cgrid >= __CGR_NUM)
2134 p = get_affine_portal();
2136 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2137 cgr->chan = p->config->channel;
2138 spin_lock(&p->cgr_lock);
2140 /* if no opts specified, just add it to the list */
2144 ret = qman_query_cgr(cgr, &cgr_state);
2149 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2150 local_opts.cgr.cscn_targ_upd_ctrl =
2151 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2153 /* Overwrite TARG */
2154 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2156 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2158 /* send init if flags indicate so */
2159 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2160 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2162 ret = qman_modify_cgr(cgr, 0, &local_opts);
2166 list_add(&cgr->node, &p->cgr_cbs);
2168 /* Determine if newly added object requires its callback to be called */
2169 ret = qman_query_cgr(cgr, &cgr_state);
2171 /* we can't go back, so proceed and return success, but screen
2172 * and wail to the log file.
2174 pr_crit("CGR HW state partially modified\n");
2178 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2182 spin_unlock(&p->cgr_lock);
2186 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2187 struct qm_mcc_initcgr *opts)
2189 struct qm_mcc_initcgr local_opts;
2190 struct qm_mcr_querycgr cgr_state;
2193 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2194 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2197 /* We have to check that the provided CGRID is within the limits of the
2198 * data-structures, for obvious reasons. However we'll let h/w take
2199 * care of determining whether it's within the limits of what exists on
2202 if (cgr->cgrid >= __CGR_NUM)
2205 ret = qman_query_cgr(cgr, &cgr_state);
2209 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2213 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2214 local_opts.cgr.cscn_targ_upd_ctrl =
2215 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2216 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2218 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2219 TARG_DCP_MASK(dcp_portal);
2220 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2222 /* send init if flags indicate so */
2223 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2224 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2227 ret = qman_modify_cgr(cgr, 0, &local_opts);
2232 int qman_delete_cgr(struct qman_cgr *cgr)
2234 struct qm_mcr_querycgr cgr_state;
2235 struct qm_mcc_initcgr local_opts;
2238 struct qman_portal *p = get_affine_portal();
2240 if (cgr->chan != p->config->channel) {
2241 pr_crit("Attempting to delete cgr from different portal than"
2242 " it was create: create 0x%x, delete 0x%x\n",
2243 cgr->chan, p->config->channel);
2247 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2248 spin_lock(&p->cgr_lock);
2249 list_del(&cgr->node);
2251 * If there are no other CGR objects for this CGRID in the list,
2252 * update CSCN_TARG accordingly
2254 list_for_each_entry(i, &p->cgr_cbs, node)
2255 if ((i->cgrid == cgr->cgrid) && i->cb)
2257 ret = qman_query_cgr(cgr, &cgr_state);
2259 /* add back to the list */
2260 list_add(&cgr->node, &p->cgr_cbs);
2263 /* Overwrite TARG */
2264 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2265 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2266 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2268 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2270 ret = qman_modify_cgr(cgr, 0, &local_opts);
2272 /* add back to the list */
2273 list_add(&cgr->node, &p->cgr_cbs);
2275 spin_unlock(&p->cgr_lock);
2280 int qman_shutdown_fq(u32 fqid)
2282 struct qman_portal *p;
2283 struct qm_portal *low_p;
2284 struct qm_mc_command *mcc;
2285 struct qm_mc_result *mcr;
2287 int orl_empty, fq_empty, drain = 0;
2292 p = get_affine_portal();
2295 /* Determine the state of the FQID */
2296 mcc = qm_mc_start(low_p);
2297 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2298 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2299 while (!(mcr = qm_mc_result(low_p)))
2301 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2302 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2303 if (state == QM_MCR_NP_STATE_OOS)
2304 return 0; /* Already OOS, no need to do anymore checks */
2306 /* Query which channel the FQ is using */
2307 mcc = qm_mc_start(low_p);
2308 mcc->queryfq.fqid = cpu_to_be32(fqid);
2309 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2310 while (!(mcr = qm_mc_result(low_p)))
2312 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2314 /* Need to store these since the MCR gets reused */
2315 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2316 channel = dest_wq & 0x7;
2320 case QM_MCR_NP_STATE_TEN_SCHED:
2321 case QM_MCR_NP_STATE_TRU_SCHED:
2322 case QM_MCR_NP_STATE_ACTIVE:
2323 case QM_MCR_NP_STATE_PARKED:
2325 mcc = qm_mc_start(low_p);
2326 mcc->alterfq.fqid = cpu_to_be32(fqid);
2327 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2328 while (!(mcr = qm_mc_result(low_p)))
2330 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2331 QM_MCR_VERB_ALTER_RETIRE);
2332 result = mcr->result; /* Make a copy as we reuse MCR below */
2334 if (result == QM_MCR_RESULT_PENDING) {
2335 /* Need to wait for the FQRN in the message ring, which
2336 * will only occur once the FQ has been drained. In
2337 * order for the FQ to drain the portal needs to be set
2338 * to dequeue from the channel the FQ is scheduled on
2340 const struct qm_mr_entry *msg;
2341 const struct qm_dqrr_entry *dqrr = NULL;
2343 __maybe_unused u16 dequeue_wq = 0;
2345 /* Flag that we need to drain FQ */
2348 if (channel >= qm_channel_pool1 &&
2349 channel < (u16)(qm_channel_pool1 + 15)) {
2350 /* Pool channel, enable the bit in the portal */
2351 dequeue_wq = (channel -
2352 qm_channel_pool1 + 1) << 4 | wq;
2353 } else if (channel < qm_channel_pool1) {
2354 /* Dedicated channel */
2357 pr_info("Cannot recover FQ 0x%x,"
2358 " it is scheduled on channel 0x%x",
2362 /* Set the sdqcr to drain this channel */
2363 if (channel < qm_channel_pool1)
2364 qm_dqrr_sdqcr_set(low_p,
2365 QM_SDQCR_TYPE_ACTIVE |
2366 QM_SDQCR_CHANNELS_DEDICATED);
2368 qm_dqrr_sdqcr_set(low_p,
2369 QM_SDQCR_TYPE_ACTIVE |
2370 QM_SDQCR_CHANNELS_POOL_CONV
2372 while (!found_fqrn) {
2373 /* Keep draining DQRR while checking the MR*/
2374 qm_dqrr_pvb_update(low_p);
2375 dqrr = qm_dqrr_current(low_p);
2377 qm_dqrr_cdc_consume_1ptr(
2379 qm_dqrr_pvb_update(low_p);
2380 qm_dqrr_next(low_p);
2381 dqrr = qm_dqrr_current(low_p);
2383 /* Process message ring too */
2384 qm_mr_pvb_update(low_p);
2385 msg = qm_mr_current(low_p);
2388 QM_MR_VERB_TYPE_MASK)
2392 qm_mr_cci_consume_to_current(low_p);
2393 qm_mr_pvb_update(low_p);
2394 msg = qm_mr_current(low_p);
2399 if (result != QM_MCR_RESULT_OK &&
2400 result != QM_MCR_RESULT_PENDING) {
2402 pr_err("qman_retire_fq failed on FQ 0x%x,"
2403 " result=0x%x\n", fqid, result);
2406 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2407 /* ORL had no entries, no need to wait until the
2412 /* Retirement succeeded, check to see if FQ needs
2415 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2416 /* FQ is Not Empty, drain using volatile DQ commands */
2419 const struct qm_dqrr_entry *dqrr = NULL;
2420 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2422 qm_dqrr_vdqcr_set(low_p, vdqcr);
2424 /* Wait for a dequeue to occur */
2425 while (dqrr == NULL) {
2426 qm_dqrr_pvb_update(low_p);
2427 dqrr = qm_dqrr_current(low_p);
2431 /* Process the dequeues, making sure to
2432 * empty the ring completely.
2435 if (dqrr->fqid == fqid &&
2436 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2438 qm_dqrr_cdc_consume_1ptr(low_p,
2440 qm_dqrr_pvb_update(low_p);
2441 qm_dqrr_next(low_p);
2442 dqrr = qm_dqrr_current(low_p);
2444 } while (fq_empty == 0);
2446 qm_dqrr_sdqcr_set(low_p, 0);
2448 /* Wait for the ORL to have been completely drained */
2449 while (orl_empty == 0) {
2450 const struct qm_mr_entry *msg;
2452 qm_mr_pvb_update(low_p);
2453 msg = qm_mr_current(low_p);
2455 if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
2459 qm_mr_cci_consume_to_current(low_p);
2460 qm_mr_pvb_update(low_p);
2461 msg = qm_mr_current(low_p);
2465 mcc = qm_mc_start(low_p);
2466 mcc->alterfq.fqid = cpu_to_be32(fqid);
2467 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2468 while (!(mcr = qm_mc_result(low_p)))
2470 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2471 QM_MCR_VERB_ALTER_OOS);
2472 if (mcr->result != QM_MCR_RESULT_OK) {
2474 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2480 case QM_MCR_NP_STATE_RETIRED:
2481 /* Send OOS Command */
2482 mcc = qm_mc_start(low_p);
2483 mcc->alterfq.fqid = cpu_to_be32(fqid);
2484 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2485 while (!(mcr = qm_mc_result(low_p)))
2487 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2488 QM_MCR_VERB_ALTER_OOS);
2490 pr_err("OOS Failed on FQID 0x%x\n", fqid);