1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
4 * Copyright 2017,2019 NXP
9 #include <rte_branch_prediction.h>
10 #include <rte_dpaa_bus.h>
11 #include <rte_eventdev.h>
12 #include <rte_byteorder.h>
14 #include <dpaa_bits.h>
16 /* Compilation constants */
17 #define DQRR_MAXFILL 15
18 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
19 #define IRQNAME "QMan portal %d"
20 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
21 /* maximum number of DQRR entries to process in qman_poll() */
22 #define FSL_QMAN_POLL_LIMIT 8
24 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
25 * inter-processor locking only. Note, FQLOCK() is always called either under a
26 * local_irq_save() or from interrupt context - hence there's no need for irq
27 * protection (and indeed, attempting to nest irq-protection doesn't work, as
28 * the "irq en/disable" machinery isn't recursive...).
32 struct qman_fq *__fq478 = (fq); \
33 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
34 spin_lock(&__fq478->fqlock); \
36 #define FQUNLOCK(fq) \
38 struct qman_fq *__fq478 = (fq); \
39 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
40 spin_unlock(&__fq478->fqlock); \
43 static inline void fq_set(struct qman_fq *fq, u32 mask)
45 dpaa_set_bits(mask, &fq->flags);
48 static inline void fq_clear(struct qman_fq *fq, u32 mask)
50 dpaa_clear_bits(mask, &fq->flags);
53 static inline int fq_isset(struct qman_fq *fq, u32 mask)
55 return fq->flags & mask;
58 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
60 return !(fq->flags & mask);
65 /* PORTAL_BITS_*** - dynamic, strictly internal */
67 /* interrupt sources processed by portal_isr(), configurable */
68 unsigned long irq_sources;
69 u32 use_eqcr_ci_stashing;
70 u32 slowpoll; /* only used when interrupts are off */
71 /* only 1 volatile dequeue at a time */
72 struct qman_fq *vdqcr_owned;
75 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
76 * handler is called instead.
78 qman_cb_dc_ern cb_dc_ern;
79 /* When the cpu-affine portal is activated, this is non-NULL */
80 const struct qm_portal_config *config;
81 struct dpa_rbtree retire_table;
82 char irqname[MAX_IRQNAME];
83 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
84 struct qman_cgrs *cgrs;
85 /* linked-list of CSCN handlers. */
86 struct list_head cgr_cbs;
89 /* track if memory was allocated by the driver */
90 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
91 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
92 * do byte swaps of DQRR read only memory. First entry must be aligned
93 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
94 * address (6 bits for address shift + 4 bits for the DQRR size).
96 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
97 __attribute__((aligned(1024)));
101 /* Global handler for DCP ERNs. Used when the portal receiving the message does
102 * not have a portal-specific handler.
104 static qman_cb_dc_ern cb_dc_ern;
106 static cpumask_t affine_mask;
107 static DEFINE_SPINLOCK(affine_mask_lock);
108 static u16 affine_channels[NR_CPUS];
109 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
111 static inline struct qman_portal *get_affine_portal(void)
113 return &RTE_PER_LCORE(qman_affine_portal);
116 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
117 * retirement notifications (the fact they are sometimes h/w-consumed means that
118 * contextB isn't always a s/w demux - and as we can't know which case it is
119 * when looking at the notification, we have to use the slow lookup for all of
120 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
121 * (though at most one of them should be the consumer), so this table isn't for
122 * all FQs - FQs are added when retirement commands are issued, and removed when
123 * they complete, which also massively reduces the size of this table.
125 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
127 * This is what everything can wait on, even if it migrates to a different cpu
128 * to the one whose affine portal it is waiting on.
130 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
132 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
134 int ret = fqtree_push(&p->retire_table, fq);
137 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
141 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
143 fqtree_del(&p->retire_table, fq);
146 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
148 return fqtree_find(&p->retire_table, fqid);
151 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
152 static void **qman_fq_lookup_table;
153 static size_t qman_fq_lookup_table_size;
155 int qman_setup_fq_lookup_table(size_t num_entries)
158 /* Allocate 1 more entry since the first entry is not used */
159 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
160 if (!qman_fq_lookup_table) {
161 pr_err("QMan: Could not allocate fq lookup table\n");
164 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
165 qman_fq_lookup_table_size = num_entries;
166 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
167 qman_fq_lookup_table,
168 (unsigned long)qman_fq_lookup_table_size);
172 void qman_set_fq_lookup_table(void **fq_table)
174 qman_fq_lookup_table = fq_table;
177 /* global structure that maintains fq object mapping */
178 static DEFINE_SPINLOCK(fq_hash_table_lock);
180 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
184 spin_lock(&fq_hash_table_lock);
185 /* Can't use index zero because this has special meaning
186 * in context_b field.
188 for (i = 1; i < qman_fq_lookup_table_size; i++) {
189 if (qman_fq_lookup_table[i] == NULL) {
191 qman_fq_lookup_table[i] = fq;
192 spin_unlock(&fq_hash_table_lock);
196 spin_unlock(&fq_hash_table_lock);
200 static void clear_fq_table_entry(u32 entry)
202 spin_lock(&fq_hash_table_lock);
203 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
204 qman_fq_lookup_table[entry] = NULL;
205 spin_unlock(&fq_hash_table_lock);
208 static inline struct qman_fq *get_fq_table_entry(u32 entry)
210 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
211 return qman_fq_lookup_table[entry];
215 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
217 /* Byteswap the FQD to HW format */
218 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
219 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
220 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
221 fqd->context_b = cpu_to_be32(fqd->context_b);
222 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
223 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
226 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
228 /* Byteswap the FQD to CPU format */
229 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
230 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
231 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
232 fqd->context_b = be32_to_cpu(fqd->context_b);
233 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
236 static inline void cpu_to_hw_fd(struct qm_fd *fd)
238 fd->addr = cpu_to_be40(fd->addr);
239 fd->status = cpu_to_be32(fd->status);
240 fd->opaque = cpu_to_be32(fd->opaque);
243 static inline void hw_fd_to_cpu(struct qm_fd *fd)
245 fd->addr = be40_to_cpu(fd->addr);
246 fd->status = be32_to_cpu(fd->status);
247 fd->opaque = be32_to_cpu(fd->opaque);
250 /* In the case that slow- and fast-path handling are both done by qman_poll()
251 * (ie. because there is no interrupt handling), we ought to balance how often
252 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
253 * sources, so we call the fast poll 'n' times before calling the slow poll
254 * once. The idle decrementer constant is used when the last slow-poll detected
255 * no work to do, and the busy decrementer constant when the last slow-poll had
258 #define SLOW_POLL_IDLE 1000
259 #define SLOW_POLL_BUSY 10
260 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
261 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
262 unsigned int poll_limit);
264 /* Portal interrupt handler */
265 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
267 struct qman_portal *p = ptr;
269 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
270 * it could race against a Query Congestion State command also given
271 * as part of the handling of this interrupt source. We mustn't
272 * clear it a second time in this top-level function.
274 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
275 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
276 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
277 /* DQRR-handling if it's interrupt-driven */
278 if (is & QM_PIRQ_DQRI)
279 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
280 /* Handling of anything else that's interrupt-driven */
281 clear |= __poll_portal_slow(p, is);
282 qm_isr_status_clear(&p->p, clear);
286 /* This inner version is used privately by qman_create_affine_portal(), as well
287 * as by the exported qman_stop_dequeues().
289 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
291 if (!(p->dqrr_disable_ref++))
292 qm_dqrr_set_maxfill(&p->p, 0);
295 static int drain_mr_fqrni(struct qm_portal *p)
297 const struct qm_mr_entry *msg;
299 msg = qm_mr_current(p);
302 * if MR was full and h/w had other FQRNI entries to produce, we
303 * need to allow it time to produce those entries once the
304 * existing entries are consumed. A worst-case situation
305 * (fully-loaded system) means h/w sequencers may have to do 3-4
306 * other things before servicing the portal's MR pump, each of
307 * which (if slow) may take ~50 qman cycles (which is ~200
308 * processor cycles). So rounding up and then multiplying this
309 * worst-case estimate by a factor of 10, just to be
310 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
311 * one entry at a time, so h/w has an opportunity to produce new
312 * entries well before the ring has been fully consumed, so
313 * we're being *really* paranoid here.
315 u64 now, then = mfatb();
319 } while ((then + 10000) > now);
320 msg = qm_mr_current(p);
324 if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
325 /* We aren't draining anything but FQRNIs */
326 pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
330 qm_mr_cci_consume(p, 1);
334 static inline int qm_eqcr_init(struct qm_portal *portal,
335 enum qm_eqcr_pmode pmode,
336 unsigned int eq_stash_thresh,
339 /* This use of 'register', as well as all other occurrences, is because
340 * it has been observed to generate much faster code with gcc than is
341 * otherwise the case.
343 register struct qm_eqcr *eqcr = &portal->eqcr;
347 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
348 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
349 qm_cl_invalidate(EQCR_CI);
350 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
351 eqcr->cursor = eqcr->ring + pi;
352 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
353 QM_EQCR_VERB_VBIT : 0;
354 eqcr->available = QM_EQCR_SIZE - 1 -
355 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
356 eqcr->ithresh = qm_in(EQCR_ITR);
357 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
361 cfg = (qm_in(CFG) & 0x00ffffff) |
362 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
363 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
364 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
369 static inline void qm_eqcr_finish(struct qm_portal *portal)
371 register struct qm_eqcr *eqcr = &portal->eqcr;
376 * Disable EQCI stashing because the QMan only
377 * presents the value it previously stashed to
378 * maintain coherency. Setting the stash threshold
379 * to 1 then 0 ensures that QMan has resyncronized
380 * its internal copy so that the portal is clean
381 * when it is reinitialized in the future
383 cfg = (qm_in(CFG) & 0x0fffffff) |
384 (1 << 28); /* QCSP_CFG: EST */
386 cfg &= 0x0fffffff; /* stash threshold = 0 */
389 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
390 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
392 /* Refresh EQCR CI cache value */
393 qm_cl_invalidate(EQCR_CI);
394 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
396 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
397 DPAA_ASSERT(!eqcr->busy);
399 if (pi != EQCR_PTR2IDX(eqcr->cursor))
400 pr_crit("losing uncommitted EQCR entries\n");
402 pr_crit("missing existing EQCR completions\n");
403 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
404 pr_crit("EQCR destroyed unquiesced\n");
407 static inline int qm_dqrr_init(struct qm_portal *portal,
408 __maybe_unused const struct qm_portal_config *config,
409 enum qm_dqrr_dmode dmode,
410 __maybe_unused enum qm_dqrr_pmode pmode,
411 enum qm_dqrr_cmode cmode, u8 max_fill)
413 register struct qm_dqrr *dqrr = &portal->dqrr;
416 /* Make sure the DQRR will be idle when we enable */
417 qm_out(DQRR_SDQCR, 0);
418 qm_out(DQRR_VDQCR, 0);
419 qm_out(DQRR_PDQCR, 0);
420 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
421 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
422 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
423 dqrr->cursor = dqrr->ring + dqrr->ci;
424 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
425 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
426 QM_DQRR_VERB_VBIT : 0;
427 dqrr->ithresh = qm_in(DQRR_ITR);
428 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
433 /* Invalidate every ring entry before beginning */
434 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
435 dccivac(qm_cl(dqrr->ring, cfg));
436 cfg = (qm_in(CFG) & 0xff000f00) |
437 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
438 ((dmode & 1) << 18) | /* DP */
439 ((cmode & 3) << 16) | /* DCM */
441 (0 ? 0x40 : 0) | /* Ignore RP */
442 (0 ? 0x10 : 0); /* Ignore SP */
444 qm_dqrr_set_maxfill(portal, max_fill);
448 static inline void qm_dqrr_finish(struct qm_portal *portal)
450 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
451 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
452 if ((dqrr->cmode != qm_dqrr_cdc) &&
453 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
454 pr_crit("Ignoring completed DQRR entries\n");
458 static inline int qm_mr_init(struct qm_portal *portal,
459 __maybe_unused enum qm_mr_pmode pmode,
460 enum qm_mr_cmode cmode)
462 register struct qm_mr *mr = &portal->mr;
465 mr->ring = portal->addr.ce + QM_CL_MR;
466 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
467 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
468 mr->cursor = mr->ring + mr->ci;
469 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
470 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
471 mr->ithresh = qm_in(MR_ITR);
472 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
476 cfg = (qm_in(CFG) & 0xfffff0ff) |
477 ((cmode & 1) << 8); /* QCSP_CFG:MM */
482 static inline void qm_mr_pvb_update(struct qm_portal *portal)
484 register struct qm_mr *mr = &portal->mr;
485 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
487 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
488 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
490 /* when accessing 'verb', use __raw_readb() to ensure that compiler
491 * inlining doesn't try to optimise out "excess reads".
493 if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
494 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
496 mr->vbit ^= QM_MR_VERB_VBIT;
504 qman_init_portal(struct qman_portal *portal,
505 const struct qm_portal_config *c,
506 const struct qman_cgrs *cgrs)
518 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
519 portal->use_eqcr_ci_stashing = 3;
521 portal->use_eqcr_ci_stashing =
522 ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
525 * prep the low-level portal struct with the mapped addresses from the
526 * config, everything that follows depends on it and "config" is more
529 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
530 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
532 * If CI-stashing is used, the current defaults use a threshold of 3,
533 * and stash with high-than-DQRR priority.
535 if (qm_eqcr_init(p, qm_eqcr_pvb,
536 portal->use_eqcr_ci_stashing, 1)) {
537 pr_err("Qman EQCR initialisation failed\n");
540 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
541 qm_dqrr_cdc, DQRR_MAXFILL)) {
542 pr_err("Qman DQRR initialisation failed\n");
545 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
546 pr_err("Qman MR initialisation failed\n");
550 pr_err("Qman MC initialisation failed\n");
554 /* static interrupt-gating controls */
555 qm_dqrr_set_ithresh(p, 0);
556 qm_mr_set_ithresh(p, 0);
557 qm_isr_set_iperiod(p, 0);
558 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
561 /* initial snapshot is no-depletion */
562 qman_cgrs_init(&portal->cgrs[1]);
564 portal->cgrs[0] = *cgrs;
566 /* if the given mask is NULL, assume all CGRs can be seen */
567 qman_cgrs_fill(&portal->cgrs[0]);
568 INIT_LIST_HEAD(&portal->cgr_cbs);
569 spin_lock_init(&portal->cgr_lock);
571 portal->slowpoll = 0;
572 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
573 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
574 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
575 portal->dqrr_disable_ref = 0;
576 portal->cb_dc_ern = NULL;
577 sprintf(buf, "qportal-%d", c->channel);
578 dpa_rbtree_init(&portal->retire_table);
580 qm_isr_disable_write(p, isdr);
581 portal->irq_sources = 0;
582 qm_isr_enable_write(p, portal->irq_sources);
583 qm_isr_status_clear(p, 0xffffffff);
584 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
585 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
587 pr_err("request_irq() failed\n");
591 /* Need EQCR to be empty before continuing */
592 isdr &= ~QM_PIRQ_EQCI;
593 qm_isr_disable_write(p, isdr);
594 ret = qm_eqcr_get_fill(p);
596 pr_err("Qman EQCR unclean\n");
597 goto fail_eqcr_empty;
599 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
600 qm_isr_disable_write(p, isdr);
601 if (qm_dqrr_current(p)) {
602 pr_err("Qman DQRR unclean\n");
603 qm_dqrr_cdc_consume_n(p, 0xffff);
605 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
606 /* special handling, drain just in case it's a few FQRNIs */
607 if (drain_mr_fqrni(p))
608 goto fail_dqrr_mr_empty;
612 qm_isr_disable_write(p, 0);
614 /* Write a sane SDQCR */
615 qm_dqrr_sdqcr_set(p, portal->sdqcr);
619 free_irq(c->irq, portal);
622 spin_lock_destroy(&portal->cgr_lock);
635 #define MAX_GLOBAL_PORTALS 8
636 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
637 static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
640 qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
644 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
645 if (rte_atomic16_test_and_set(&global_portals_used[i])) {
646 global_portals[i].config = q_pcfg;
647 return &global_portals[i];
650 pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
656 qman_free_global_portal(struct qman_portal *portal)
660 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
661 if (&global_portals[i] == portal) {
662 rte_atomic16_clear(&global_portals_used[i]);
670 qman_portal_uninhibit_isr(struct qman_portal *portal)
672 qm_isr_uninhibit(&portal->p);
675 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
676 const struct qman_cgrs *cgrs)
678 struct qman_portal *res;
679 struct qman_portal *portal = get_affine_portal();
681 /* A criteria for calling this function (from qman_driver.c) is that
682 * we're already affine to the cpu and won't schedule onto another cpu.
684 res = qman_init_portal(portal, c, cgrs);
686 spin_lock(&affine_mask_lock);
687 CPU_SET(c->cpu, &affine_mask);
688 affine_channels[c->cpu] =
690 spin_unlock(&affine_mask_lock);
696 void qman_destroy_portal(struct qman_portal *qm)
698 const struct qm_portal_config *pcfg;
700 /* Stop dequeues on the portal */
701 qm_dqrr_sdqcr_set(&qm->p, 0);
704 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
705 * something related to QM_PIRQ_EQCI, this may need fixing.
706 * Also, due to the prefetching model used for CI updates in the enqueue
707 * path, this update will only invalidate the CI cacheline *after*
708 * working on it, so we need to call this twice to ensure a full update
709 * irrespective of where the enqueue processing was at when the teardown
712 qm_eqcr_cce_update(&qm->p);
713 qm_eqcr_cce_update(&qm->p);
716 free_irq(pcfg->irq, qm);
719 qm_mc_finish(&qm->p);
720 qm_mr_finish(&qm->p);
721 qm_dqrr_finish(&qm->p);
722 qm_eqcr_finish(&qm->p);
726 spin_lock_destroy(&qm->cgr_lock);
729 const struct qm_portal_config *
730 qman_destroy_affine_portal(struct qman_portal *qp)
732 /* We don't want to redirect if we're a slave, use "raw" */
733 struct qman_portal *qm;
734 const struct qm_portal_config *pcfg;
738 qm = get_affine_portal();
744 qman_destroy_portal(qm);
746 spin_lock(&affine_mask_lock);
747 CPU_CLR(cpu, &affine_mask);
748 spin_unlock(&affine_mask_lock);
750 qman_free_global_portal(qm);
755 int qman_get_portal_index(void)
757 struct qman_portal *p = get_affine_portal();
758 return p->config->index;
761 /* Inline helper to reduce nesting in __poll_portal_slow() */
762 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
763 const struct qm_mr_entry *msg, u8 verb)
767 case QM_MR_VERB_FQRL:
768 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
769 fq_clear(fq, QMAN_FQ_STATE_ORL);
772 case QM_MR_VERB_FQRN:
773 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
774 (fq->state == qman_fq_state_sched));
775 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
776 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
777 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
778 fq_set(fq, QMAN_FQ_STATE_NE);
779 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
780 fq_set(fq, QMAN_FQ_STATE_ORL);
783 fq->state = qman_fq_state_retired;
785 case QM_MR_VERB_FQPN:
786 DPAA_ASSERT(fq->state == qman_fq_state_sched);
787 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
788 fq->state = qman_fq_state_parked;
793 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
795 const struct qm_mr_entry *msg;
796 struct qm_mr_entry swapped_msg;
798 if (is & QM_PIRQ_CSCI) {
799 struct qman_cgrs rr, c;
800 struct qm_mc_result *mcr;
801 struct qman_cgr *cgr;
803 spin_lock(&p->cgr_lock);
805 * The CSCI bit must be cleared _before_ issuing the
806 * Query Congestion State command, to ensure that a long
807 * CGR State Change callback cannot miss an intervening
810 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
812 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
813 while (!(mcr = qm_mc_result(&p->p)))
815 /* mask out the ones I'm not interested in */
816 qman_cgrs_and(&rr, (const struct qman_cgrs *)
817 &mcr->querycongestion.state, &p->cgrs[0]);
818 /* check previous snapshot for delta, enter/exit congestion */
819 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
820 /* update snapshot */
821 qman_cgrs_cp(&p->cgrs[1], &rr);
822 /* Invoke callback */
823 list_for_each_entry(cgr, &p->cgr_cbs, node)
824 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
825 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
826 spin_unlock(&p->cgr_lock);
829 if (is & QM_PIRQ_EQRI) {
830 qm_eqcr_cce_update(&p->p);
831 qm_eqcr_set_ithresh(&p->p, 0);
832 wake_up(&affine_queue);
835 if (is & QM_PIRQ_MRI) {
839 qm_mr_pvb_update(&p->p);
840 msg = qm_mr_current(&p->p);
844 hw_fd_to_cpu(&swapped_msg.ern.fd);
845 verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
846 /* The message is a software ERN iff the 0x20 bit is set */
849 case QM_MR_VERB_FQRNI:
850 /* nada, we drop FQRNIs on the floor */
852 case QM_MR_VERB_FQRN:
853 case QM_MR_VERB_FQRL:
854 /* Lookup in the retirement table */
855 fq = table_find_fq(p,
856 be32_to_cpu(msg->fq.fqid));
858 fq_state_change(p, fq, &swapped_msg, verb);
860 fq->cb.fqs(p, fq, &swapped_msg);
862 case QM_MR_VERB_FQPN:
864 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
865 fq = get_fq_table_entry(msg->fq.contextB);
867 fq = (void *)(uintptr_t)msg->fq.contextB;
869 fq_state_change(p, fq, msg, verb);
871 fq->cb.fqs(p, fq, &swapped_msg);
873 case QM_MR_VERB_DC_ERN:
876 p->cb_dc_ern(p, msg);
880 static int warn_once;
883 pr_crit("Leaking DCP ERNs!\n");
889 pr_crit("Invalid MR verb 0x%02x\n", verb);
892 /* Its a software ERN */
893 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
894 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
896 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
898 fq->cb.ern(p, fq, &swapped_msg);
904 qm_mr_cci_consume(&p->p, num);
907 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
908 * processing. If that interrupt source has meanwhile been re-asserted,
909 * we mustn't clear it here (or in the top-level interrupt handler).
911 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
915 * remove some slowish-path stuff from the "fast path" and make sure it isn't
918 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
920 p->vdqcr_owned = NULL;
922 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
924 wake_up(&affine_queue);
928 * The only states that would conflict with other things if they ran at the
929 * same time on the same cpu are:
931 * (i) setting/clearing vdqcr_owned, and
932 * (ii) clearing the NE (Not Empty) flag.
934 * Both are safe. Because;
936 * (i) this clearing can only occur after qman_set_vdq() has set the
937 * vdqcr_owned field (which it does before setting VDQCR), and
938 * qman_volatile_dequeue() blocks interrupts and preemption while this is
939 * done so that we can't interfere.
940 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
941 * with (i) that API prevents us from interfering until it's safe.
943 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
944 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
945 * advantage comes from this function not having to "lock" anything at all.
947 * Note also that the callbacks are invoked at points which are safe against the
948 * above potential conflicts, but that this function itself is not re-entrant
949 * (this is because the function tracks one end of each FIFO in the portal and
950 * we do *not* want to lock that). So the consequence is that it is safe for
951 * user callbacks to call into any QMan API.
953 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
954 unsigned int poll_limit)
956 const struct qm_dqrr_entry *dq;
958 enum qman_cb_dqrr_result res;
959 unsigned int limit = 0;
960 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
961 struct qm_dqrr_entry *shadow;
964 qm_dqrr_pvb_update(&p->p);
965 dq = qm_dqrr_current(&p->p);
968 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
969 /* If running on an LE system the fields of the
970 * dequeue entry must be swapper. Because the
971 * QMan HW will ignore writes the DQRR entry is
972 * copied and the index stored within the copy
974 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
977 shadow->fqid = be32_to_cpu(shadow->fqid);
978 shadow->seqnum = be16_to_cpu(shadow->seqnum);
979 hw_fd_to_cpu(&shadow->fd);
982 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
984 * VDQCR: don't trust context_b as the FQ may have
985 * been configured for h/w consumption and we're
986 * draining it post-retirement.
990 * We only set QMAN_FQ_STATE_NE when retiring, so we
991 * only need to check for clearing it when doing
992 * volatile dequeues. It's one less thing to check
993 * in the critical path (SDQCR).
995 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
996 fq_clear(fq, QMAN_FQ_STATE_NE);
998 * This is duplicated from the SDQCR code, but we
999 * have stuff to do before *and* after this callback,
1000 * and we don't want multiple if()s in the critical
1003 res = fq->cb.dqrr(p, fq, dq);
1004 if (res == qman_cb_dqrr_stop)
1006 /* Check for VDQCR completion */
1007 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1010 /* SDQCR: context_b points to the FQ */
1011 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1012 fq = get_fq_table_entry(dq->contextB);
1014 fq = (void *)(uintptr_t)dq->contextB;
1016 /* Now let the callback do its stuff */
1017 res = fq->cb.dqrr(p, fq, dq);
1019 * The callback can request that we exit without
1020 * consuming this entry nor advancing;
1022 if (res == qman_cb_dqrr_stop)
1025 /* Interpret 'dq' from a driver perspective. */
1027 * Parking isn't possible unless HELDACTIVE was set. NB,
1028 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1029 * check for HELDACTIVE to cover both.
1031 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1032 (res != qman_cb_dqrr_park));
1033 /* just means "skip it, I'll consume it myself later on" */
1034 if (res != qman_cb_dqrr_defer)
1035 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1036 res == qman_cb_dqrr_park);
1038 qm_dqrr_next(&p->p);
1040 * Entry processed and consumed, increment our counter. The
1041 * callback can request that we exit after consuming the
1042 * entry, and we also exit if we reach our processing limit,
1043 * so loop back only if neither of these conditions is met.
1045 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1050 int qman_irqsource_add(u32 bits)
1052 struct qman_portal *p = get_affine_portal();
1054 bits = bits & QM_PIRQ_VISIBLE;
1056 /* Clear any previously remaining interrupt conditions in
1057 * QCSP_ISR. This prevents raising a false interrupt when
1058 * interrupt conditions are enabled in QCSP_IER.
1060 qm_isr_status_clear(&p->p, bits);
1061 dpaa_set_bits(bits, &p->irq_sources);
1062 qm_isr_enable_write(&p->p, p->irq_sources);
1067 int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
1069 bits = bits & QM_PIRQ_VISIBLE;
1071 /* Clear any previously remaining interrupt conditions in
1072 * QCSP_ISR. This prevents raising a false interrupt when
1073 * interrupt conditions are enabled in QCSP_IER.
1075 qm_isr_status_clear(&p->p, bits);
1076 dpaa_set_bits(bits, &p->irq_sources);
1077 qm_isr_enable_write(&p->p, p->irq_sources);
1082 int qman_irqsource_remove(u32 bits)
1084 struct qman_portal *p = get_affine_portal();
1087 /* Our interrupt handler only processes+clears status register bits that
1088 * are in p->irq_sources. As we're trimming that mask, if one of them
1089 * were to assert in the status register just before we remove it from
1090 * the enable register, there would be an interrupt-storm when we
1091 * release the IRQ lock. So we wait for the enable register update to
1092 * take effect in h/w (by reading it back) and then clear all other bits
1093 * in the status register. Ie. we clear them from ISR once it's certain
1094 * IER won't allow them to reassert.
1097 bits &= QM_PIRQ_VISIBLE;
1098 dpaa_clear_bits(bits, &p->irq_sources);
1099 qm_isr_enable_write(&p->p, p->irq_sources);
1100 ier = qm_isr_enable_read(&p->p);
1101 /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1102 * data-dependency, ie. to protect against re-ordering.
1104 qm_isr_status_clear(&p->p, ~ier);
1108 int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
1112 /* Our interrupt handler only processes+clears status register bits that
1113 * are in p->irq_sources. As we're trimming that mask, if one of them
1114 * were to assert in the status register just before we remove it from
1115 * the enable register, there would be an interrupt-storm when we
1116 * release the IRQ lock. So we wait for the enable register update to
1117 * take effect in h/w (by reading it back) and then clear all other bits
1118 * in the status register. Ie. we clear them from ISR once it's certain
1119 * IER won't allow them to reassert.
1122 bits &= QM_PIRQ_VISIBLE;
1123 dpaa_clear_bits(bits, &p->irq_sources);
1124 qm_isr_enable_write(&p->p, p->irq_sources);
1125 ier = qm_isr_enable_read(&p->p);
1126 /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1127 * data-dependency, ie. to protect against re-ordering.
1129 qm_isr_status_clear(&p->p, ~ier);
1133 u16 qman_affine_channel(int cpu)
1136 struct qman_portal *portal = get_affine_portal();
1138 cpu = portal->config->cpu;
1140 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1141 return affine_channels[cpu];
1144 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1146 struct qman_portal *p)
1148 struct qm_portal *portal = &p->p;
1149 register struct qm_dqrr *dqrr = &portal->dqrr;
1150 struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
1152 unsigned int limit = 0, rx_number = 0;
1153 uint32_t consume = 0;
1156 qm_dqrr_pvb_update(&p->p);
1160 dq[rx_number] = dqrr->cursor;
1161 dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
1162 /* Prefetch the next DQRR entry */
1163 rte_prefetch0(dqrr->cursor);
1165 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1166 /* If running on an LE system the fields of the
1167 * dequeue entry must be swapper. Because the
1168 * QMan HW will ignore writes the DQRR entry is
1169 * copied and the index stored within the copy
1172 &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
1173 shadow[rx_number]->fd.opaque_addr =
1174 dq[rx_number]->fd.opaque_addr;
1175 shadow[rx_number]->fd.addr =
1176 be40_to_cpu(dq[rx_number]->fd.addr);
1177 shadow[rx_number]->fd.opaque =
1178 be32_to_cpu(dq[rx_number]->fd.opaque);
1180 shadow[rx_number] = dq[rx_number];
1183 /* SDQCR: context_b points to the FQ */
1184 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1185 fq = qman_fq_lookup_table[dq[rx_number]->contextB];
1187 fq = (void *)dq[rx_number]->contextB;
1189 if (fq->cb.dqrr_prepare)
1190 fq->cb.dqrr_prepare(shadow[rx_number],
1193 consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
1196 } while (++limit < poll_limit);
1199 fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
1201 /* Consume all the DQRR enries together */
1202 qm_out(DQRR_DCAP, (1 << 8) | consume);
1207 void qman_clear_irq(void)
1209 struct qman_portal *p = get_affine_portal();
1210 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
1211 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
1212 qm_isr_status_clear(&p->p, clear);
1215 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1218 const struct qm_dqrr_entry *dq;
1220 enum qman_cb_dqrr_result res;
1221 unsigned int limit = 0;
1222 struct qman_portal *p = get_affine_portal();
1223 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1224 struct qm_dqrr_entry *shadow;
1226 unsigned int rx_number = 0;
1229 qm_dqrr_pvb_update(&p->p);
1230 dq = qm_dqrr_current(&p->p);
1233 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1235 * If running on an LE system the fields of the
1236 * dequeue entry must be swapper. Because the
1237 * QMan HW will ignore writes the DQRR entry is
1238 * copied and the index stored within the copy
1240 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1243 shadow->fqid = be32_to_cpu(shadow->fqid);
1244 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1245 hw_fd_to_cpu(&shadow->fd);
1248 /* SDQCR: context_b points to the FQ */
1249 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1250 fq = get_fq_table_entry(dq->contextB);
1252 fq = (void *)(uintptr_t)dq->contextB;
1254 /* Now let the callback do its stuff */
1255 res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
1256 dq, &bufs[rx_number]);
1258 /* Interpret 'dq' from a driver perspective. */
1260 * Parking isn't possible unless HELDACTIVE was set. NB,
1261 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1262 * check for HELDACTIVE to cover both.
1264 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1265 (res != qman_cb_dqrr_park));
1266 if (res != qman_cb_dqrr_defer)
1267 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1268 res == qman_cb_dqrr_park);
1270 qm_dqrr_next(&p->p);
1272 * Entry processed and consumed, increment our counter. The
1273 * callback can request that we exit after consuming the
1274 * entry, and we also exit if we reach our processing limit,
1275 * so loop back only if neither of these conditions is met.
1277 } while (++limit < poll_limit);
1282 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1284 struct qman_portal *p = get_affine_portal();
1285 const struct qm_dqrr_entry *dq;
1286 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1287 struct qm_dqrr_entry *shadow;
1290 qm_dqrr_pvb_update(&p->p);
1291 dq = qm_dqrr_current(&p->p);
1295 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1296 /* Invalid DQRR - put the portal and consume the DQRR.
1297 * Return NULL to user as no packet is seen.
1299 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1303 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1304 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1307 shadow->fqid = be32_to_cpu(shadow->fqid);
1308 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1309 hw_fd_to_cpu(&shadow->fd);
1312 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1313 fq_clear(fq, QMAN_FQ_STATE_NE);
1315 return (struct qm_dqrr_entry *)dq;
1318 void qman_dqrr_consume(struct qman_fq *fq,
1319 struct qm_dqrr_entry *dq)
1321 struct qman_portal *p = get_affine_portal();
1323 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1326 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1327 qm_dqrr_next(&p->p);
1330 int qman_poll_dqrr(unsigned int limit)
1332 struct qman_portal *p = get_affine_portal();
1335 ret = __poll_portal_fast(p, limit);
1339 void qman_poll(void)
1341 struct qman_portal *p = get_affine_portal();
1343 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1344 if (!(p->slowpoll--)) {
1345 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1346 u32 active = __poll_portal_slow(p, is);
1349 qm_isr_status_clear(&p->p, active);
1350 p->slowpoll = SLOW_POLL_BUSY;
1352 p->slowpoll = SLOW_POLL_IDLE;
1355 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1356 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1359 void qman_stop_dequeues(void)
1361 struct qman_portal *p = get_affine_portal();
1363 qman_stop_dequeues_ex(p);
1366 void qman_start_dequeues(void)
1368 struct qman_portal *p = get_affine_portal();
1370 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1371 if (!(--p->dqrr_disable_ref))
1372 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1375 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
1377 struct qman_portal *p = qp ? qp : get_affine_portal();
1379 pools &= p->config->pools;
1381 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1384 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
1386 struct qman_portal *p = qp ? qp : get_affine_portal();
1388 pools &= p->config->pools;
1390 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1393 u32 qman_static_dequeue_get(struct qman_portal *qp)
1395 struct qman_portal *p = qp ? qp : get_affine_portal();
1399 void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
1401 struct qman_portal *p = get_affine_portal();
1403 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1406 void qman_dca_index(u8 index, int park_request)
1408 struct qman_portal *p = get_affine_portal();
1410 qm_dqrr_cdc_consume_1(&p->p, index, park_request);
1413 /* Frame queue API */
1414 static const char *mcr_result_str(u8 result)
1417 case QM_MCR_RESULT_NULL:
1418 return "QM_MCR_RESULT_NULL";
1419 case QM_MCR_RESULT_OK:
1420 return "QM_MCR_RESULT_OK";
1421 case QM_MCR_RESULT_ERR_FQID:
1422 return "QM_MCR_RESULT_ERR_FQID";
1423 case QM_MCR_RESULT_ERR_FQSTATE:
1424 return "QM_MCR_RESULT_ERR_FQSTATE";
1425 case QM_MCR_RESULT_ERR_NOTEMPTY:
1426 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1427 case QM_MCR_RESULT_PENDING:
1428 return "QM_MCR_RESULT_PENDING";
1429 case QM_MCR_RESULT_ERR_BADCOMMAND:
1430 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1432 return "<unknown MCR result>";
1435 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1438 struct qm_mcr_queryfq_np np;
1439 struct qm_mc_command *mcc;
1440 struct qm_mc_result *mcr;
1441 struct qman_portal *p;
1443 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1444 int ret = qman_alloc_fqid(&fqid);
1449 spin_lock_init(&fq->fqlock);
1451 fq->fqid_le = cpu_to_be32(fqid);
1453 fq->state = qman_fq_state_oos;
1454 fq->cgr_groupid = 0;
1455 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1456 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1457 pr_info("Find empty table entry failed\n");
1460 fq->qman_fq_lookup_table = qman_fq_lookup_table;
1462 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1464 /* Everything else is AS_IS support */
1465 p = get_affine_portal();
1466 mcc = qm_mc_start(&p->p);
1467 mcc->queryfq.fqid = cpu_to_be32(fqid);
1468 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1469 while (!(mcr = qm_mc_result(&p->p)))
1471 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1472 if (mcr->result != QM_MCR_RESULT_OK) {
1473 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1476 fqd = mcr->queryfq.fqd;
1477 hw_fqd_to_cpu(&fqd);
1478 mcc = qm_mc_start(&p->p);
1479 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1480 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1481 while (!(mcr = qm_mc_result(&p->p)))
1483 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1484 if (mcr->result != QM_MCR_RESULT_OK) {
1485 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1488 np = mcr->queryfq_np;
1489 /* Phew, have queryfq and queryfq_np results, stitch together
1490 * the FQ object from those.
1492 fq->cgr_groupid = fqd.cgid;
1493 switch (np.state & QM_MCR_NP_STATE_MASK) {
1494 case QM_MCR_NP_STATE_OOS:
1496 case QM_MCR_NP_STATE_RETIRED:
1497 fq->state = qman_fq_state_retired;
1499 fq_set(fq, QMAN_FQ_STATE_NE);
1501 case QM_MCR_NP_STATE_TEN_SCHED:
1502 case QM_MCR_NP_STATE_TRU_SCHED:
1503 case QM_MCR_NP_STATE_ACTIVE:
1504 fq->state = qman_fq_state_sched;
1505 if (np.state & QM_MCR_NP_STATE_R)
1506 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1508 case QM_MCR_NP_STATE_PARKED:
1509 fq->state = qman_fq_state_parked;
1512 DPAA_ASSERT(NULL == "invalid FQ state");
1514 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1515 fq->state |= QMAN_FQ_STATE_CGR_EN;
1518 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1519 qman_release_fqid(fqid);
1523 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1526 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1527 * quiesced. Instead, run some checks.
1529 switch (fq->state) {
1530 case qman_fq_state_parked:
1531 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1533 case qman_fq_state_oos:
1534 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1535 qman_release_fqid(fq->fqid);
1536 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1537 clear_fq_table_entry(fq->key);
1543 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1546 u32 qman_fq_fqid(struct qman_fq *fq)
1551 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1559 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1561 struct qm_mc_command *mcc;
1562 struct qm_mc_result *mcr;
1563 struct qman_portal *p;
1565 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1566 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1568 if ((fq->state != qman_fq_state_oos) &&
1569 (fq->state != qman_fq_state_parked))
1571 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1572 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1575 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1576 /* And can't be set at the same time as TDTHRESH */
1577 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1580 /* Issue an INITFQ_[PARKED|SCHED] management command */
1581 p = get_affine_portal();
1583 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1584 ((fq->state != qman_fq_state_oos) &&
1585 (fq->state != qman_fq_state_parked)))) {
1589 mcc = qm_mc_start(&p->p);
1591 mcc->initfq = *opts;
1592 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1593 mcc->initfq.count = 0;
1595 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1596 * demux pointer. Otherwise, the caller-provided value is allowed to
1597 * stand, don't overwrite it.
1599 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1602 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1603 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1604 mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
1606 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1609 * and the physical address - NB, if the user wasn't trying to
1610 * set CONTEXTA, clear the stashing settings.
1612 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1613 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1614 memset(&mcc->initfq.fqd.context_a, 0,
1615 sizeof(mcc->initfq.fqd.context_a));
1617 phys_fq = rte_mem_virt2iova(fq);
1618 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1621 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1622 mcc->initfq.fqd.dest.channel = p->config->channel;
1623 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1624 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1625 mcc->initfq.fqd.dest.wq = 4;
1628 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1629 cpu_to_hw_fqd(&mcc->initfq.fqd);
1630 qm_mc_commit(&p->p, myverb);
1631 while (!(mcr = qm_mc_result(&p->p)))
1633 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1635 if (res != QM_MCR_RESULT_OK) {
1640 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1641 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1642 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1644 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1646 if (opts->we_mask & QM_INITFQ_WE_CGID)
1647 fq->cgr_groupid = opts->fqd.cgid;
1649 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1650 qman_fq_state_sched : qman_fq_state_parked;
1655 int qman_schedule_fq(struct qman_fq *fq)
1657 struct qm_mc_command *mcc;
1658 struct qm_mc_result *mcr;
1659 struct qman_portal *p;
1664 if (fq->state != qman_fq_state_parked)
1666 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1667 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1670 /* Issue a ALTERFQ_SCHED management command */
1671 p = get_affine_portal();
1674 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1675 (fq->state != qman_fq_state_parked))) {
1679 mcc = qm_mc_start(&p->p);
1680 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1681 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1682 while (!(mcr = qm_mc_result(&p->p)))
1684 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1686 if (res != QM_MCR_RESULT_OK) {
1690 fq->state = qman_fq_state_sched;
1697 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1699 struct qm_mc_command *mcc;
1700 struct qm_mc_result *mcr;
1701 struct qman_portal *p;
1706 if ((fq->state != qman_fq_state_parked) &&
1707 (fq->state != qman_fq_state_sched))
1709 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1710 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1713 p = get_affine_portal();
1716 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1717 (fq->state == qman_fq_state_retired) ||
1718 (fq->state == qman_fq_state_oos))) {
1722 rval = table_push_fq(p, fq);
1725 mcc = qm_mc_start(&p->p);
1726 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1727 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1728 while (!(mcr = qm_mc_result(&p->p)))
1730 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1733 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1734 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1735 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1736 * friendly, otherwise the caller doesn't necessarily have a fully
1737 * "retired" FQ on return even if the retirement was immediate. However
1738 * this does mean some code duplication between here and
1739 * fq_state_change().
1741 if (likely(res == QM_MCR_RESULT_OK)) {
1743 /* Process 'fq' right away, we'll ignore FQRNI */
1744 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1745 fq_set(fq, QMAN_FQ_STATE_NE);
1746 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1747 fq_set(fq, QMAN_FQ_STATE_ORL);
1749 table_del_fq(p, fq);
1752 fq->state = qman_fq_state_retired;
1755 * Another issue with supporting "immediate" retirement
1756 * is that we're forced to drop FQRNIs, because by the
1757 * time they're seen it may already be "too late" (the
1758 * fq may have been OOS'd and free()'d already). But if
1759 * the upper layer wants a callback whether it's
1760 * immediate or not, we have to fake a "MR" entry to
1761 * look like an FQRNI...
1763 struct qm_mr_entry msg;
1765 msg.ern.verb = QM_MR_VERB_FQRNI;
1766 msg.fq.fqs = mcr->alterfq.fqs;
1767 msg.fq.fqid = fq->fqid;
1768 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1769 msg.fq.contextB = fq->key;
1771 msg.fq.contextB = (u32)(uintptr_t)fq;
1773 fq->cb.fqs(p, fq, &msg);
1775 } else if (res == QM_MCR_RESULT_PENDING) {
1777 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1780 table_del_fq(p, fq);
1787 int qman_oos_fq(struct qman_fq *fq)
1789 struct qm_mc_command *mcc;
1790 struct qm_mc_result *mcr;
1791 struct qman_portal *p;
1796 if (fq->state != qman_fq_state_retired)
1798 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1799 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1802 p = get_affine_portal();
1804 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1805 (fq->state != qman_fq_state_retired))) {
1809 mcc = qm_mc_start(&p->p);
1810 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1811 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1812 while (!(mcr = qm_mc_result(&p->p)))
1814 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1816 if (res != QM_MCR_RESULT_OK) {
1820 fq->state = qman_fq_state_oos;
1826 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1828 struct qm_mc_command *mcc;
1829 struct qm_mc_result *mcr;
1830 struct qman_portal *p;
1836 if ((fq->state == qman_fq_state_oos) ||
1837 (fq->state == qman_fq_state_retired) ||
1838 (fq->state == qman_fq_state_parked))
1841 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1842 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1845 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1846 p = get_affine_portal();
1848 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1849 (fq->state == qman_fq_state_parked) ||
1850 (fq->state == qman_fq_state_oos) ||
1851 (fq->state == qman_fq_state_retired))) {
1855 mcc = qm_mc_start(&p->p);
1856 mcc->alterfq.fqid = fq->fqid;
1857 mcc->alterfq.count = 0;
1858 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1860 qm_mc_commit(&p->p, myverb);
1861 while (!(mcr = qm_mc_result(&p->p)))
1863 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1866 if (res != QM_MCR_RESULT_OK) {
1875 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1877 struct qm_mc_command *mcc;
1878 struct qm_mc_result *mcr;
1879 struct qman_portal *p = get_affine_portal();
1883 mcc = qm_mc_start(&p->p);
1884 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1885 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1886 while (!(mcr = qm_mc_result(&p->p)))
1888 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1890 if (res == QM_MCR_RESULT_OK)
1891 *fqd = mcr->queryfq.fqd;
1893 if (res != QM_MCR_RESULT_OK)
1898 int qman_query_fq_has_pkts(struct qman_fq *fq)
1900 struct qm_mc_command *mcc;
1901 struct qm_mc_result *mcr;
1902 struct qman_portal *p = get_affine_portal();
1907 mcc = qm_mc_start(&p->p);
1908 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1909 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1910 while (!(mcr = qm_mc_result(&p->p)))
1913 if (res == QM_MCR_RESULT_OK)
1914 ret = !!mcr->queryfq_np.frm_cnt;
1918 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1920 struct qm_mc_command *mcc;
1921 struct qm_mc_result *mcr;
1922 struct qman_portal *p = get_affine_portal();
1926 mcc = qm_mc_start(&p->p);
1927 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1928 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1929 while (!(mcr = qm_mc_result(&p->p)))
1931 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1933 if (res == QM_MCR_RESULT_OK) {
1934 *np = mcr->queryfq_np;
1935 np->fqd_link = be24_to_cpu(np->fqd_link);
1936 np->odp_seq = be16_to_cpu(np->odp_seq);
1937 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1938 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1939 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1940 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1941 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1942 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1943 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1944 np->ics_surp = be16_to_cpu(np->ics_surp);
1945 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1946 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1947 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1948 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1949 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1950 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1951 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1953 if (res == QM_MCR_RESULT_ERR_FQID)
1955 else if (res != QM_MCR_RESULT_OK)
1960 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
1962 struct qm_mc_command *mcc;
1963 struct qm_mc_result *mcr;
1964 struct qman_portal *p = get_affine_portal();
1966 mcc = qm_mc_start(&p->p);
1967 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1968 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1969 while (!(mcr = qm_mc_result(&p->p)))
1971 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1973 if (mcr->result == QM_MCR_RESULT_OK)
1974 *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
1975 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
1977 else if (mcr->result != QM_MCR_RESULT_OK)
1982 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1984 struct qm_mc_command *mcc;
1985 struct qm_mc_result *mcr;
1986 struct qman_portal *p = get_affine_portal();
1990 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1991 QM_MCR_VERB_QUERYWQ;
1992 mcc = qm_mc_start(&p->p);
1993 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1994 qm_mc_commit(&p->p, myverb);
1995 while (!(mcr = qm_mc_result(&p->p)))
1997 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1999 if (res == QM_MCR_RESULT_OK) {
2002 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
2003 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
2004 for (i = 0; i < array_len; i++)
2005 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
2007 if (res != QM_MCR_RESULT_OK) {
2008 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
2014 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
2015 struct qm_mcr_cgrtestwrite *result)
2017 struct qm_mc_command *mcc;
2018 struct qm_mc_result *mcr;
2019 struct qman_portal *p = get_affine_portal();
2023 mcc = qm_mc_start(&p->p);
2024 mcc->cgrtestwrite.cgid = cgr->cgrid;
2025 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
2026 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
2027 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
2028 while (!(mcr = qm_mc_result(&p->p)))
2030 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
2032 if (res == QM_MCR_RESULT_OK)
2033 *result = mcr->cgrtestwrite;
2034 if (res != QM_MCR_RESULT_OK) {
2035 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
2041 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
2043 struct qm_mc_command *mcc;
2044 struct qm_mc_result *mcr;
2045 struct qman_portal *p = get_affine_portal();
2049 mcc = qm_mc_start(&p->p);
2050 mcc->querycgr.cgid = cgr->cgrid;
2051 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2052 while (!(mcr = qm_mc_result(&p->p)))
2054 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2056 if (res == QM_MCR_RESULT_OK)
2057 *cgrd = mcr->querycgr;
2058 if (res != QM_MCR_RESULT_OK) {
2059 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
2062 cgrd->cgr.wr_parm_g.word =
2063 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
2064 cgrd->cgr.wr_parm_y.word =
2065 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
2066 cgrd->cgr.wr_parm_r.word =
2067 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
2068 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
2069 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
2070 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
2071 cgrd->cscn_targ_swp[i] =
2072 be32_to_cpu(cgrd->cscn_targ_swp[i]);
2076 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
2078 struct qm_mc_result *mcr;
2079 struct qman_portal *p = get_affine_portal();
2084 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
2085 while (!(mcr = qm_mc_result(&p->p)))
2087 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2088 QM_MCC_VERB_QUERYCONGESTION);
2090 if (res == QM_MCR_RESULT_OK)
2091 *congestion = mcr->querycongestion;
2092 if (res != QM_MCR_RESULT_OK) {
2093 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
2096 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
2097 congestion->state.state[i] =
2098 be32_to_cpu(congestion->state.state[i]);
2102 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
2104 struct qman_portal *p = get_affine_portal();
2108 vdqcr = vdqcr_flags;
2109 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
2111 if ((fq->state != qman_fq_state_parked) &&
2112 (fq->state != qman_fq_state_retired)) {
2116 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
2120 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2122 if (!p->vdqcr_owned) {
2124 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2126 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2128 p->vdqcr_owned = fq;
2133 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2139 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
2142 struct qman_portal *p;
2145 if ((fq->state != qman_fq_state_parked) &&
2146 (fq->state != qman_fq_state_retired))
2148 if (vdqcr & QM_VDQCR_FQID_MASK)
2150 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2152 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2154 p = get_affine_portal();
2156 if (!p->vdqcr_owned) {
2158 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2160 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2162 p->vdqcr_owned = fq;
2170 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2174 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
2177 qm_eqcr_cce_prefetch(&p->p);
2179 qm_eqcr_cce_update(&p->p);
2182 int qman_eqcr_is_empty(void)
2184 struct qman_portal *p = get_affine_portal();
2187 update_eqcr_ci(p, 0);
2188 avail = qm_eqcr_get_fill(&p->p);
2189 return (avail == 0);
2192 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
2195 struct qman_portal *p = get_affine_portal();
2197 p->cb_dc_ern = handler;
2199 cb_dc_ern = handler;
2202 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
2204 const struct qm_fd *fd,
2207 struct qm_eqcr_entry *eq;
2210 if (p->use_eqcr_ci_stashing) {
2212 * The stashing case is easy, only update if we need to in
2213 * order to try and liberate ring entries.
2215 eq = qm_eqcr_start_stash(&p->p);
2218 * The non-stashing case is harder, need to prefetch ahead of
2221 avail = qm_eqcr_get_avail(&p->p);
2223 update_eqcr_ci(p, avail);
2224 eq = qm_eqcr_start_no_stash(&p->p);
2230 if (flags & QMAN_ENQUEUE_FLAG_DCA)
2231 eq->dca = QM_EQCR_DCA_ENABLE |
2232 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
2233 QM_EQCR_DCA_PARK : 0) |
2234 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
2235 eq->fqid = cpu_to_be32(fq->fqid);
2236 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2237 eq->tag = cpu_to_be32(fq->key);
2239 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2242 cpu_to_hw_fd(&eq->fd);
2246 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
2248 struct qman_portal *p = get_affine_portal();
2249 struct qm_eqcr_entry *eq;
2251 eq = try_p_eq_start(p, fq, fd, flags);
2254 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2255 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
2256 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2257 /* Factor the below out, it's used from qman_enqueue_orp() too */
2261 int qman_enqueue_multi(struct qman_fq *fq,
2262 const struct qm_fd *fd, u32 *flags,
2265 struct qman_portal *p = get_affine_portal();
2266 struct qm_portal *portal = &p->p;
2268 register struct qm_eqcr *eqcr = &portal->eqcr;
2269 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2271 u8 i = 0, diff, old_ci, sent = 0;
2273 /* Update the available entries if no entry is free */
2274 if (!eqcr->available) {
2276 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2277 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2278 eqcr->available += diff;
2283 /* try to send as many frames as possible */
2284 while (eqcr->available && frames_to_send--) {
2285 eq->fqid = fq->fqid_le;
2286 eq->fd.opaque_addr = fd->opaque_addr;
2287 eq->fd.addr = cpu_to_be40(fd->addr);
2288 eq->fd.status = cpu_to_be32(fd->status);
2289 eq->fd.opaque = cpu_to_be32(fd->opaque);
2290 if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2291 eq->dca = QM_EQCR_DCA_ENABLE |
2292 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2295 eq = (void *)((unsigned long)(eq + 1) &
2296 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2303 /* In order for flushes to complete faster, all lines are recorded in
2307 for (i = 0; i < sent; i++) {
2308 eq->__dont_write_directly__verb =
2309 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2311 eq = (void *)((unsigned long)(eq + 1) &
2312 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2313 if (unlikely((prev_eq + 1) != eq))
2314 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2317 /* We need to flush all the lines but without load/store operations
2321 for (i = 0; i < sent; i++) {
2323 eq = (void *)((unsigned long)(eq + 1) &
2324 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2326 /* Update cursor for the next call */
2332 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
2333 u32 *flags, int frames_to_send)
2335 struct qman_portal *p = get_affine_portal();
2336 struct qm_portal *portal = &p->p;
2338 register struct qm_eqcr *eqcr = &portal->eqcr;
2339 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2341 u8 i = 0, diff, old_ci, sent = 0;
2343 /* Update the available entries if no entry is free */
2344 if (!eqcr->available) {
2346 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2347 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2348 eqcr->available += diff;
2353 /* try to send as many frames as possible */
2354 while (eqcr->available && frames_to_send--) {
2355 eq->fqid = fq[sent]->fqid_le;
2356 eq->fd.opaque_addr = fd->opaque_addr;
2357 eq->fd.addr = cpu_to_be40(fd->addr);
2358 eq->fd.status = cpu_to_be32(fd->status);
2359 eq->fd.opaque = cpu_to_be32(fd->opaque);
2360 if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2361 eq->dca = QM_EQCR_DCA_ENABLE |
2362 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2366 eq = (void *)((unsigned long)(eq + 1) &
2367 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2374 /* In order for flushes to complete faster, all lines are recorded in
2378 for (i = 0; i < sent; i++) {
2379 eq->__dont_write_directly__verb =
2380 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2382 eq = (void *)((unsigned long)(eq + 1) &
2383 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2384 if (unlikely((prev_eq + 1) != eq))
2385 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2388 /* We need to flush all the lines but without load/store operations
2392 for (i = 0; i < sent; i++) {
2394 eq = (void *)((unsigned long)(eq + 1) &
2395 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2397 /* Update cursor for the next call */
2402 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2403 struct qman_fq *orp, u16 orp_seqnum)
2405 struct qman_portal *p = get_affine_portal();
2406 struct qm_eqcr_entry *eq;
2408 eq = try_p_eq_start(p, fq, fd, flags);
2411 /* Process ORP-specifics here */
2412 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2413 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2415 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2416 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2417 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2419 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2420 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2422 eq->seqnum = cpu_to_be16(orp_seqnum);
2423 eq->orp = cpu_to_be32(orp->fqid);
2424 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2425 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2426 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2427 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2428 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2433 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2434 struct qm_mcc_initcgr *opts)
2436 struct qm_mc_command *mcc;
2437 struct qm_mc_result *mcr;
2438 struct qman_portal *p = get_affine_portal();
2441 u8 verb = QM_MCC_VERB_MODIFYCGR;
2443 mcc = qm_mc_start(&p->p);
2445 mcc->initcgr = *opts;
2446 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2447 mcc->initcgr.cgr.wr_parm_g.word =
2448 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2449 mcc->initcgr.cgr.wr_parm_y.word =
2450 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2451 mcc->initcgr.cgr.wr_parm_r.word =
2452 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2453 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2454 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2456 mcc->initcgr.cgid = cgr->cgrid;
2457 if (flags & QMAN_CGR_FLAG_USE_INIT)
2458 verb = QM_MCC_VERB_INITCGR;
2459 qm_mc_commit(&p->p, verb);
2460 while (!(mcr = qm_mc_result(&p->p)))
2463 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2465 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2468 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2469 QM_CHANNEL_SWPORTAL0))
2470 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2471 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2473 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2474 struct qm_mcc_initcgr *opts)
2476 struct qm_mcr_querycgr cgr_state;
2477 struct qm_mcc_initcgr local_opts;
2479 struct qman_portal *p;
2481 /* We have to check that the provided CGRID is within the limits of the
2482 * data-structures, for obvious reasons. However we'll let h/w take
2483 * care of determining whether it's within the limits of what exists on
2486 if (cgr->cgrid >= __CGR_NUM)
2489 p = get_affine_portal();
2491 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2492 cgr->chan = p->config->channel;
2493 spin_lock(&p->cgr_lock);
2495 /* if no opts specified, just add it to the list */
2499 ret = qman_query_cgr(cgr, &cgr_state);
2504 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2505 local_opts.cgr.cscn_targ_upd_ctrl =
2506 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2508 /* Overwrite TARG */
2509 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2511 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2513 /* send init if flags indicate so */
2514 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2515 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2517 ret = qman_modify_cgr(cgr, 0, &local_opts);
2521 list_add(&cgr->node, &p->cgr_cbs);
2523 /* Determine if newly added object requires its callback to be called */
2524 ret = qman_query_cgr(cgr, &cgr_state);
2526 /* we can't go back, so proceed and return success, but screen
2527 * and wail to the log file.
2529 pr_crit("CGR HW state partially modified\n");
2533 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2537 spin_unlock(&p->cgr_lock);
2541 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2542 struct qm_mcc_initcgr *opts)
2544 struct qm_mcc_initcgr local_opts;
2545 struct qm_mcr_querycgr cgr_state;
2548 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2549 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2552 /* We have to check that the provided CGRID is within the limits of the
2553 * data-structures, for obvious reasons. However we'll let h/w take
2554 * care of determining whether it's within the limits of what exists on
2557 if (cgr->cgrid >= __CGR_NUM)
2560 ret = qman_query_cgr(cgr, &cgr_state);
2564 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2568 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2569 local_opts.cgr.cscn_targ_upd_ctrl =
2570 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2571 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2573 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2574 TARG_DCP_MASK(dcp_portal);
2575 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2577 /* send init if flags indicate so */
2578 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2579 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2582 ret = qman_modify_cgr(cgr, 0, &local_opts);
2587 int qman_delete_cgr(struct qman_cgr *cgr)
2589 struct qm_mcr_querycgr cgr_state;
2590 struct qm_mcc_initcgr local_opts;
2593 struct qman_portal *p = get_affine_portal();
2595 if (cgr->chan != p->config->channel) {
2596 pr_crit("Attempting to delete cgr from different portal than"
2597 " it was create: create 0x%x, delete 0x%x\n",
2598 cgr->chan, p->config->channel);
2602 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2603 spin_lock(&p->cgr_lock);
2604 list_del(&cgr->node);
2606 * If there are no other CGR objects for this CGRID in the list,
2607 * update CSCN_TARG accordingly
2609 list_for_each_entry(i, &p->cgr_cbs, node)
2610 if ((i->cgrid == cgr->cgrid) && i->cb)
2612 ret = qman_query_cgr(cgr, &cgr_state);
2614 /* add back to the list */
2615 list_add(&cgr->node, &p->cgr_cbs);
2618 /* Overwrite TARG */
2619 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2620 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2621 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2623 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2625 ret = qman_modify_cgr(cgr, 0, &local_opts);
2627 /* add back to the list */
2628 list_add(&cgr->node, &p->cgr_cbs);
2630 spin_unlock(&p->cgr_lock);
2635 int qman_shutdown_fq(u32 fqid)
2637 struct qman_portal *p;
2638 struct qm_portal *low_p;
2639 struct qm_mc_command *mcc;
2640 struct qm_mc_result *mcr;
2642 int orl_empty, fq_empty, drain = 0;
2647 p = get_affine_portal();
2650 /* Determine the state of the FQID */
2651 mcc = qm_mc_start(low_p);
2652 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2653 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2654 while (!(mcr = qm_mc_result(low_p)))
2656 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2657 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2658 if (state == QM_MCR_NP_STATE_OOS)
2659 return 0; /* Already OOS, no need to do anymore checks */
2661 /* Query which channel the FQ is using */
2662 mcc = qm_mc_start(low_p);
2663 mcc->queryfq.fqid = cpu_to_be32(fqid);
2664 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2665 while (!(mcr = qm_mc_result(low_p)))
2667 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2669 /* Need to store these since the MCR gets reused */
2670 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2671 channel = dest_wq & 0x7;
2675 case QM_MCR_NP_STATE_TEN_SCHED:
2676 case QM_MCR_NP_STATE_TRU_SCHED:
2677 case QM_MCR_NP_STATE_ACTIVE:
2678 case QM_MCR_NP_STATE_PARKED:
2680 mcc = qm_mc_start(low_p);
2681 mcc->alterfq.fqid = cpu_to_be32(fqid);
2682 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2683 while (!(mcr = qm_mc_result(low_p)))
2685 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2686 QM_MCR_VERB_ALTER_RETIRE);
2687 result = mcr->result; /* Make a copy as we reuse MCR below */
2689 if (result == QM_MCR_RESULT_PENDING) {
2690 /* Need to wait for the FQRN in the message ring, which
2691 * will only occur once the FQ has been drained. In
2692 * order for the FQ to drain the portal needs to be set
2693 * to dequeue from the channel the FQ is scheduled on
2695 const struct qm_mr_entry *msg;
2696 const struct qm_dqrr_entry *dqrr = NULL;
2698 __maybe_unused u16 dequeue_wq = 0;
2700 /* Flag that we need to drain FQ */
2703 if (channel >= qm_channel_pool1 &&
2704 channel < (u16)(qm_channel_pool1 + 15)) {
2705 /* Pool channel, enable the bit in the portal */
2706 dequeue_wq = (channel -
2707 qm_channel_pool1 + 1) << 4 | wq;
2708 } else if (channel < qm_channel_pool1) {
2709 /* Dedicated channel */
2712 pr_info("Cannot recover FQ 0x%x,"
2713 " it is scheduled on channel 0x%x",
2717 /* Set the sdqcr to drain this channel */
2718 if (channel < qm_channel_pool1)
2719 qm_dqrr_sdqcr_set(low_p,
2720 QM_SDQCR_TYPE_ACTIVE |
2721 QM_SDQCR_CHANNELS_DEDICATED);
2723 qm_dqrr_sdqcr_set(low_p,
2724 QM_SDQCR_TYPE_ACTIVE |
2725 QM_SDQCR_CHANNELS_POOL_CONV
2727 while (!found_fqrn) {
2728 /* Keep draining DQRR while checking the MR*/
2729 qm_dqrr_pvb_update(low_p);
2730 dqrr = qm_dqrr_current(low_p);
2732 qm_dqrr_cdc_consume_1ptr(
2734 qm_dqrr_pvb_update(low_p);
2735 qm_dqrr_next(low_p);
2736 dqrr = qm_dqrr_current(low_p);
2738 /* Process message ring too */
2739 qm_mr_pvb_update(low_p);
2740 msg = qm_mr_current(low_p);
2742 if ((msg->ern.verb &
2743 QM_MR_VERB_TYPE_MASK)
2747 qm_mr_cci_consume_to_current(low_p);
2748 qm_mr_pvb_update(low_p);
2749 msg = qm_mr_current(low_p);
2754 if (result != QM_MCR_RESULT_OK &&
2755 result != QM_MCR_RESULT_PENDING) {
2757 pr_err("qman_retire_fq failed on FQ 0x%x,"
2758 " result=0x%x\n", fqid, result);
2761 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2762 /* ORL had no entries, no need to wait until the
2767 /* Retirement succeeded, check to see if FQ needs
2770 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2771 /* FQ is Not Empty, drain using volatile DQ commands */
2774 const struct qm_dqrr_entry *dqrr = NULL;
2775 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2777 qm_dqrr_vdqcr_set(low_p, vdqcr);
2779 /* Wait for a dequeue to occur */
2780 while (dqrr == NULL) {
2781 qm_dqrr_pvb_update(low_p);
2782 dqrr = qm_dqrr_current(low_p);
2786 /* Process the dequeues, making sure to
2787 * empty the ring completely.
2790 if (dqrr->fqid == fqid &&
2791 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2793 qm_dqrr_cdc_consume_1ptr(low_p,
2795 qm_dqrr_pvb_update(low_p);
2796 qm_dqrr_next(low_p);
2797 dqrr = qm_dqrr_current(low_p);
2799 } while (fq_empty == 0);
2801 qm_dqrr_sdqcr_set(low_p, 0);
2803 /* Wait for the ORL to have been completely drained */
2804 while (orl_empty == 0) {
2805 const struct qm_mr_entry *msg;
2807 qm_mr_pvb_update(low_p);
2808 msg = qm_mr_current(low_p);
2810 if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
2814 qm_mr_cci_consume_to_current(low_p);
2815 qm_mr_pvb_update(low_p);
2816 msg = qm_mr_current(low_p);
2820 mcc = qm_mc_start(low_p);
2821 mcc->alterfq.fqid = cpu_to_be32(fqid);
2822 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2823 while (!(mcr = qm_mc_result(low_p)))
2825 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2826 QM_MCR_VERB_ALTER_OOS);
2827 if (mcr->result != QM_MCR_RESULT_OK) {
2829 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2835 case QM_MCR_NP_STATE_RETIRED:
2836 /* Send OOS Command */
2837 mcc = qm_mc_start(low_p);
2838 mcc->alterfq.fqid = cpu_to_be32(fqid);
2839 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2840 while (!(mcr = qm_mc_result(low_p)))
2842 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2843 QM_MCR_VERB_ALTER_OOS);
2845 pr_err("OOS Failed on FQID 0x%x\n", fqid);