1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
9 #include <rte_branch_prediction.h>
10 #include <rte_dpaa_bus.h>
12 /* Compilation constants */
13 #define DQRR_MAXFILL 15
14 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
15 #define IRQNAME "QMan portal %d"
16 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
17 /* maximum number of DQRR entries to process in qman_poll() */
18 #define FSL_QMAN_POLL_LIMIT 8
20 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
21 * inter-processor locking only. Note, FQLOCK() is always called either under a
22 * local_irq_save() or from interrupt context - hence there's no need for irq
23 * protection (and indeed, attempting to nest irq-protection doesn't work, as
24 * the "irq en/disable" machinery isn't recursive...).
28 struct qman_fq *__fq478 = (fq); \
29 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
30 spin_lock(&__fq478->fqlock); \
32 #define FQUNLOCK(fq) \
34 struct qman_fq *__fq478 = (fq); \
35 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
36 spin_unlock(&__fq478->fqlock); \
39 static inline void fq_set(struct qman_fq *fq, u32 mask)
41 dpaa_set_bits(mask, &fq->flags);
44 static inline void fq_clear(struct qman_fq *fq, u32 mask)
46 dpaa_clear_bits(mask, &fq->flags);
49 static inline int fq_isset(struct qman_fq *fq, u32 mask)
51 return fq->flags & mask;
54 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
56 return !(fq->flags & mask);
61 /* PORTAL_BITS_*** - dynamic, strictly internal */
63 /* interrupt sources processed by portal_isr(), configurable */
64 unsigned long irq_sources;
65 u32 use_eqcr_ci_stashing;
66 u32 slowpoll; /* only used when interrupts are off */
67 /* only 1 volatile dequeue at a time */
68 struct qman_fq *vdqcr_owned;
71 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
72 * handler is called instead.
74 qman_cb_dc_ern cb_dc_ern;
75 /* When the cpu-affine portal is activated, this is non-NULL */
76 const struct qm_portal_config *config;
77 struct dpa_rbtree retire_table;
78 char irqname[MAX_IRQNAME];
79 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
80 struct qman_cgrs *cgrs;
81 /* linked-list of CSCN handlers. */
82 struct list_head cgr_cbs;
85 /* track if memory was allocated by the driver */
86 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
87 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
88 * do byte swaps of DQRR read only memory. First entry must be aligned
89 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
90 * address (6 bits for address shift + 4 bits for the DQRR size).
92 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
93 __attribute__((aligned(1024)));
97 /* Global handler for DCP ERNs. Used when the portal receiving the message does
98 * not have a portal-specific handler.
100 static qman_cb_dc_ern cb_dc_ern;
102 static cpumask_t affine_mask;
103 static DEFINE_SPINLOCK(affine_mask_lock);
104 static u16 affine_channels[NR_CPUS];
105 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
107 static inline struct qman_portal *get_affine_portal(void)
109 return &RTE_PER_LCORE(qman_affine_portal);
112 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
113 * retirement notifications (the fact they are sometimes h/w-consumed means that
114 * contextB isn't always a s/w demux - and as we can't know which case it is
115 * when looking at the notification, we have to use the slow lookup for all of
116 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
117 * (though at most one of them should be the consumer), so this table isn't for
118 * all FQs - FQs are added when retirement commands are issued, and removed when
119 * they complete, which also massively reduces the size of this table.
121 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
123 * This is what everything can wait on, even if it migrates to a different cpu
124 * to the one whose affine portal it is waiting on.
126 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
128 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
130 int ret = fqtree_push(&p->retire_table, fq);
133 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
137 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
139 fqtree_del(&p->retire_table, fq);
142 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
144 return fqtree_find(&p->retire_table, fqid);
147 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
148 static void **qman_fq_lookup_table;
149 static size_t qman_fq_lookup_table_size;
151 int qman_setup_fq_lookup_table(size_t num_entries)
154 /* Allocate 1 more entry since the first entry is not used */
155 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
156 if (!qman_fq_lookup_table) {
157 pr_err("QMan: Could not allocate fq lookup table\n");
160 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
161 qman_fq_lookup_table_size = num_entries;
162 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
163 qman_fq_lookup_table,
164 (unsigned long)qman_fq_lookup_table_size);
168 /* global structure that maintains fq object mapping */
169 static DEFINE_SPINLOCK(fq_hash_table_lock);
171 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
175 spin_lock(&fq_hash_table_lock);
176 /* Can't use index zero because this has special meaning
177 * in context_b field.
179 for (i = 1; i < qman_fq_lookup_table_size; i++) {
180 if (qman_fq_lookup_table[i] == NULL) {
182 qman_fq_lookup_table[i] = fq;
183 spin_unlock(&fq_hash_table_lock);
187 spin_unlock(&fq_hash_table_lock);
191 static void clear_fq_table_entry(u32 entry)
193 spin_lock(&fq_hash_table_lock);
194 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
195 qman_fq_lookup_table[entry] = NULL;
196 spin_unlock(&fq_hash_table_lock);
199 static inline struct qman_fq *get_fq_table_entry(u32 entry)
201 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
202 return qman_fq_lookup_table[entry];
206 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
208 /* Byteswap the FQD to HW format */
209 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
210 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
211 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
212 fqd->context_b = cpu_to_be32(fqd->context_b);
213 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
214 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
217 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
219 /* Byteswap the FQD to CPU format */
220 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
221 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
222 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
223 fqd->context_b = be32_to_cpu(fqd->context_b);
224 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
227 static inline void cpu_to_hw_fd(struct qm_fd *fd)
229 fd->addr = cpu_to_be40(fd->addr);
230 fd->status = cpu_to_be32(fd->status);
231 fd->opaque = cpu_to_be32(fd->opaque);
234 static inline void hw_fd_to_cpu(struct qm_fd *fd)
236 fd->addr = be40_to_cpu(fd->addr);
237 fd->status = be32_to_cpu(fd->status);
238 fd->opaque = be32_to_cpu(fd->opaque);
241 /* In the case that slow- and fast-path handling are both done by qman_poll()
242 * (ie. because there is no interrupt handling), we ought to balance how often
243 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
244 * sources, so we call the fast poll 'n' times before calling the slow poll
245 * once. The idle decrementer constant is used when the last slow-poll detected
246 * no work to do, and the busy decrementer constant when the last slow-poll had
249 #define SLOW_POLL_IDLE 1000
250 #define SLOW_POLL_BUSY 10
251 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
252 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
253 unsigned int poll_limit);
255 /* Portal interrupt handler */
256 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
258 struct qman_portal *p = ptr;
260 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
261 * it could race against a Query Congestion State command also given
262 * as part of the handling of this interrupt source. We mustn't
263 * clear it a second time in this top-level function.
265 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
266 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
267 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
268 /* DQRR-handling if it's interrupt-driven */
269 if (is & QM_PIRQ_DQRI)
270 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
271 /* Handling of anything else that's interrupt-driven */
272 clear |= __poll_portal_slow(p, is);
273 qm_isr_status_clear(&p->p, clear);
277 /* This inner version is used privately by qman_create_affine_portal(), as well
278 * as by the exported qman_stop_dequeues().
280 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
282 if (!(p->dqrr_disable_ref++))
283 qm_dqrr_set_maxfill(&p->p, 0);
286 static int drain_mr_fqrni(struct qm_portal *p)
288 const struct qm_mr_entry *msg;
290 msg = qm_mr_current(p);
293 * if MR was full and h/w had other FQRNI entries to produce, we
294 * need to allow it time to produce those entries once the
295 * existing entries are consumed. A worst-case situation
296 * (fully-loaded system) means h/w sequencers may have to do 3-4
297 * other things before servicing the portal's MR pump, each of
298 * which (if slow) may take ~50 qman cycles (which is ~200
299 * processor cycles). So rounding up and then multiplying this
300 * worst-case estimate by a factor of 10, just to be
301 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
302 * one entry at a time, so h/w has an opportunity to produce new
303 * entries well before the ring has been fully consumed, so
304 * we're being *really* paranoid here.
306 u64 now, then = mfatb();
310 } while ((then + 10000) > now);
311 msg = qm_mr_current(p);
315 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
316 /* We aren't draining anything but FQRNIs */
317 pr_err("Found verb 0x%x in MR\n", msg->verb);
321 qm_mr_cci_consume(p, 1);
325 static inline int qm_eqcr_init(struct qm_portal *portal,
326 enum qm_eqcr_pmode pmode,
327 unsigned int eq_stash_thresh,
330 /* This use of 'register', as well as all other occurrences, is because
331 * it has been observed to generate much faster code with gcc than is
332 * otherwise the case.
334 register struct qm_eqcr *eqcr = &portal->eqcr;
338 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
339 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
340 qm_cl_invalidate(EQCR_CI);
341 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
342 eqcr->cursor = eqcr->ring + pi;
343 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
344 QM_EQCR_VERB_VBIT : 0;
345 eqcr->available = QM_EQCR_SIZE - 1 -
346 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
347 eqcr->ithresh = qm_in(EQCR_ITR);
348 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
352 cfg = (qm_in(CFG) & 0x00ffffff) |
353 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
354 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
355 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
360 static inline void qm_eqcr_finish(struct qm_portal *portal)
362 register struct qm_eqcr *eqcr = &portal->eqcr;
367 * Disable EQCI stashing because the QMan only
368 * presents the value it previously stashed to
369 * maintain coherency. Setting the stash threshold
370 * to 1 then 0 ensures that QMan has resyncronized
371 * its internal copy so that the portal is clean
372 * when it is reinitialized in the future
374 cfg = (qm_in(CFG) & 0x0fffffff) |
375 (1 << 28); /* QCSP_CFG: EST */
377 cfg &= 0x0fffffff; /* stash threshold = 0 */
380 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
381 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
383 /* Refresh EQCR CI cache value */
384 qm_cl_invalidate(EQCR_CI);
385 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
387 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
388 DPAA_ASSERT(!eqcr->busy);
390 if (pi != EQCR_PTR2IDX(eqcr->cursor))
391 pr_crit("losing uncommitted EQCR entries\n");
393 pr_crit("missing existing EQCR completions\n");
394 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
395 pr_crit("EQCR destroyed unquiesced\n");
398 static inline int qm_dqrr_init(struct qm_portal *portal,
399 __maybe_unused const struct qm_portal_config *config,
400 enum qm_dqrr_dmode dmode,
401 __maybe_unused enum qm_dqrr_pmode pmode,
402 enum qm_dqrr_cmode cmode, u8 max_fill)
404 register struct qm_dqrr *dqrr = &portal->dqrr;
407 /* Make sure the DQRR will be idle when we enable */
408 qm_out(DQRR_SDQCR, 0);
409 qm_out(DQRR_VDQCR, 0);
410 qm_out(DQRR_PDQCR, 0);
411 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
412 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
413 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
414 dqrr->cursor = dqrr->ring + dqrr->ci;
415 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
416 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
417 QM_DQRR_VERB_VBIT : 0;
418 dqrr->ithresh = qm_in(DQRR_ITR);
419 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
424 /* Invalidate every ring entry before beginning */
425 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
426 dccivac(qm_cl(dqrr->ring, cfg));
427 cfg = (qm_in(CFG) & 0xff000f00) |
428 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
429 ((dmode & 1) << 18) | /* DP */
430 ((cmode & 3) << 16) | /* DCM */
432 (0 ? 0x40 : 0) | /* Ignore RP */
433 (0 ? 0x10 : 0); /* Ignore SP */
435 qm_dqrr_set_maxfill(portal, max_fill);
439 static inline void qm_dqrr_finish(struct qm_portal *portal)
441 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
442 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
443 if ((dqrr->cmode != qm_dqrr_cdc) &&
444 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
445 pr_crit("Ignoring completed DQRR entries\n");
449 static inline int qm_mr_init(struct qm_portal *portal,
450 __maybe_unused enum qm_mr_pmode pmode,
451 enum qm_mr_cmode cmode)
453 register struct qm_mr *mr = &portal->mr;
456 mr->ring = portal->addr.ce + QM_CL_MR;
457 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
458 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
459 mr->cursor = mr->ring + mr->ci;
460 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
461 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
462 mr->ithresh = qm_in(MR_ITR);
463 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
467 cfg = (qm_in(CFG) & 0xfffff0ff) |
468 ((cmode & 1) << 8); /* QCSP_CFG:MM */
473 static inline void qm_mr_pvb_update(struct qm_portal *portal)
475 register struct qm_mr *mr = &portal->mr;
476 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
478 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
479 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
481 /* when accessing 'verb', use __raw_readb() to ensure that compiler
482 * inlining doesn't try to optimise out "excess reads".
484 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
485 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
487 mr->vbit ^= QM_MR_VERB_VBIT;
495 struct qman_portal *qman_create_portal(
496 struct qman_portal *portal,
497 const struct qm_portal_config *c,
498 const struct qman_cgrs *cgrs)
507 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
508 portal->use_eqcr_ci_stashing = 3;
510 portal->use_eqcr_ci_stashing =
511 ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
514 * prep the low-level portal struct with the mapped addresses from the
515 * config, everything that follows depends on it and "config" is more
518 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
519 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
521 * If CI-stashing is used, the current defaults use a threshold of 3,
522 * and stash with high-than-DQRR priority.
524 if (qm_eqcr_init(p, qm_eqcr_pvb,
525 portal->use_eqcr_ci_stashing, 1)) {
526 pr_err("Qman EQCR initialisation failed\n");
529 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
530 qm_dqrr_cdc, DQRR_MAXFILL)) {
531 pr_err("Qman DQRR initialisation failed\n");
534 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
535 pr_err("Qman MR initialisation failed\n");
539 pr_err("Qman MC initialisation failed\n");
543 /* static interrupt-gating controls */
544 qm_dqrr_set_ithresh(p, 0);
545 qm_mr_set_ithresh(p, 0);
546 qm_isr_set_iperiod(p, 0);
547 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
550 /* initial snapshot is no-depletion */
551 qman_cgrs_init(&portal->cgrs[1]);
553 portal->cgrs[0] = *cgrs;
555 /* if the given mask is NULL, assume all CGRs can be seen */
556 qman_cgrs_fill(&portal->cgrs[0]);
557 INIT_LIST_HEAD(&portal->cgr_cbs);
558 spin_lock_init(&portal->cgr_lock);
560 portal->slowpoll = 0;
561 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
562 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
563 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
564 portal->dqrr_disable_ref = 0;
565 portal->cb_dc_ern = NULL;
566 sprintf(buf, "qportal-%d", c->channel);
567 dpa_rbtree_init(&portal->retire_table);
569 qm_isr_disable_write(p, isdr);
570 portal->irq_sources = 0;
571 qm_isr_enable_write(p, portal->irq_sources);
572 qm_isr_status_clear(p, 0xffffffff);
573 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
574 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
576 pr_err("request_irq() failed\n");
580 /* Need EQCR to be empty before continuing */
581 isdr &= ~QM_PIRQ_EQCI;
582 qm_isr_disable_write(p, isdr);
583 ret = qm_eqcr_get_fill(p);
585 pr_err("Qman EQCR unclean\n");
586 goto fail_eqcr_empty;
588 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
589 qm_isr_disable_write(p, isdr);
590 if (qm_dqrr_current(p)) {
591 pr_err("Qman DQRR unclean\n");
592 qm_dqrr_cdc_consume_n(p, 0xffff);
594 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
595 /* special handling, drain just in case it's a few FQRNIs */
596 if (drain_mr_fqrni(p))
597 goto fail_dqrr_mr_empty;
601 qm_isr_disable_write(p, 0);
603 /* Write a sane SDQCR */
604 qm_dqrr_sdqcr_set(p, portal->sdqcr);
608 free_irq(c->irq, portal);
611 spin_lock_destroy(&portal->cgr_lock);
624 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
625 const struct qman_cgrs *cgrs)
627 struct qman_portal *res;
628 struct qman_portal *portal = get_affine_portal();
629 /* A criteria for calling this function (from qman_driver.c) is that
630 * we're already affine to the cpu and won't schedule onto another cpu.
633 res = qman_create_portal(portal, c, cgrs);
635 spin_lock(&affine_mask_lock);
636 CPU_SET(c->cpu, &affine_mask);
637 affine_channels[c->cpu] =
639 spin_unlock(&affine_mask_lock);
645 void qman_destroy_portal(struct qman_portal *qm)
647 const struct qm_portal_config *pcfg;
649 /* Stop dequeues on the portal */
650 qm_dqrr_sdqcr_set(&qm->p, 0);
653 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
654 * something related to QM_PIRQ_EQCI, this may need fixing.
655 * Also, due to the prefetching model used for CI updates in the enqueue
656 * path, this update will only invalidate the CI cacheline *after*
657 * working on it, so we need to call this twice to ensure a full update
658 * irrespective of where the enqueue processing was at when the teardown
661 qm_eqcr_cce_update(&qm->p);
662 qm_eqcr_cce_update(&qm->p);
665 free_irq(pcfg->irq, qm);
668 qm_mc_finish(&qm->p);
669 qm_mr_finish(&qm->p);
670 qm_dqrr_finish(&qm->p);
671 qm_eqcr_finish(&qm->p);
675 spin_lock_destroy(&qm->cgr_lock);
678 const struct qm_portal_config *qman_destroy_affine_portal(void)
680 /* We don't want to redirect if we're a slave, use "raw" */
681 struct qman_portal *qm = get_affine_portal();
682 const struct qm_portal_config *pcfg;
688 qman_destroy_portal(qm);
690 spin_lock(&affine_mask_lock);
691 CPU_CLR(cpu, &affine_mask);
692 spin_unlock(&affine_mask_lock);
696 int qman_get_portal_index(void)
698 struct qman_portal *p = get_affine_portal();
699 return p->config->index;
702 /* Inline helper to reduce nesting in __poll_portal_slow() */
703 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
704 const struct qm_mr_entry *msg, u8 verb)
708 case QM_MR_VERB_FQRL:
709 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
710 fq_clear(fq, QMAN_FQ_STATE_ORL);
713 case QM_MR_VERB_FQRN:
714 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
715 (fq->state == qman_fq_state_sched));
716 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
717 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
718 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
719 fq_set(fq, QMAN_FQ_STATE_NE);
720 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
721 fq_set(fq, QMAN_FQ_STATE_ORL);
724 fq->state = qman_fq_state_retired;
726 case QM_MR_VERB_FQPN:
727 DPAA_ASSERT(fq->state == qman_fq_state_sched);
728 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
729 fq->state = qman_fq_state_parked;
734 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
736 const struct qm_mr_entry *msg;
737 struct qm_mr_entry swapped_msg;
739 if (is & QM_PIRQ_CSCI) {
740 struct qman_cgrs rr, c;
741 struct qm_mc_result *mcr;
742 struct qman_cgr *cgr;
744 spin_lock(&p->cgr_lock);
746 * The CSCI bit must be cleared _before_ issuing the
747 * Query Congestion State command, to ensure that a long
748 * CGR State Change callback cannot miss an intervening
751 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
753 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
754 while (!(mcr = qm_mc_result(&p->p)))
756 /* mask out the ones I'm not interested in */
757 qman_cgrs_and(&rr, (const struct qman_cgrs *)
758 &mcr->querycongestion.state, &p->cgrs[0]);
759 /* check previous snapshot for delta, enter/exit congestion */
760 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
761 /* update snapshot */
762 qman_cgrs_cp(&p->cgrs[1], &rr);
763 /* Invoke callback */
764 list_for_each_entry(cgr, &p->cgr_cbs, node)
765 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
766 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
767 spin_unlock(&p->cgr_lock);
770 if (is & QM_PIRQ_EQRI) {
771 qm_eqcr_cce_update(&p->p);
772 qm_eqcr_set_ithresh(&p->p, 0);
773 wake_up(&affine_queue);
776 if (is & QM_PIRQ_MRI) {
780 qm_mr_pvb_update(&p->p);
781 msg = qm_mr_current(&p->p);
785 hw_fd_to_cpu(&swapped_msg.ern.fd);
786 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
787 /* The message is a software ERN iff the 0x20 bit is set */
790 case QM_MR_VERB_FQRNI:
791 /* nada, we drop FQRNIs on the floor */
793 case QM_MR_VERB_FQRN:
794 case QM_MR_VERB_FQRL:
795 /* Lookup in the retirement table */
796 fq = table_find_fq(p,
797 be32_to_cpu(msg->fq.fqid));
799 fq_state_change(p, fq, &swapped_msg, verb);
801 fq->cb.fqs(p, fq, &swapped_msg);
803 case QM_MR_VERB_FQPN:
805 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
806 fq = get_fq_table_entry(
807 be32_to_cpu(msg->fq.contextB));
809 fq = (void *)(uintptr_t)
810 be32_to_cpu(msg->fq.contextB);
812 fq_state_change(p, fq, msg, verb);
814 fq->cb.fqs(p, fq, &swapped_msg);
816 case QM_MR_VERB_DC_ERN:
819 p->cb_dc_ern(p, msg);
823 static int warn_once;
826 pr_crit("Leaking DCP ERNs!\n");
832 pr_crit("Invalid MR verb 0x%02x\n", verb);
835 /* Its a software ERN */
836 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
837 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
839 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
841 fq->cb.ern(p, fq, &swapped_msg);
847 qm_mr_cci_consume(&p->p, num);
850 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
851 * processing. If that interrupt source has meanwhile been re-asserted,
852 * we mustn't clear it here (or in the top-level interrupt handler).
854 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
858 * remove some slowish-path stuff from the "fast path" and make sure it isn't
861 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
863 p->vdqcr_owned = NULL;
865 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
867 wake_up(&affine_queue);
871 * The only states that would conflict with other things if they ran at the
872 * same time on the same cpu are:
874 * (i) setting/clearing vdqcr_owned, and
875 * (ii) clearing the NE (Not Empty) flag.
877 * Both are safe. Because;
879 * (i) this clearing can only occur after qman_set_vdq() has set the
880 * vdqcr_owned field (which it does before setting VDQCR), and
881 * qman_volatile_dequeue() blocks interrupts and preemption while this is
882 * done so that we can't interfere.
883 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
884 * with (i) that API prevents us from interfering until it's safe.
886 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
887 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
888 * advantage comes from this function not having to "lock" anything at all.
890 * Note also that the callbacks are invoked at points which are safe against the
891 * above potential conflicts, but that this function itself is not re-entrant
892 * (this is because the function tracks one end of each FIFO in the portal and
893 * we do *not* want to lock that). So the consequence is that it is safe for
894 * user callbacks to call into any QMan API.
896 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
897 unsigned int poll_limit)
899 const struct qm_dqrr_entry *dq;
901 enum qman_cb_dqrr_result res;
902 unsigned int limit = 0;
903 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
904 struct qm_dqrr_entry *shadow;
907 qm_dqrr_pvb_update(&p->p);
908 dq = qm_dqrr_current(&p->p);
911 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
912 /* If running on an LE system the fields of the
913 * dequeue entry must be swapper. Because the
914 * QMan HW will ignore writes the DQRR entry is
915 * copied and the index stored within the copy
917 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
920 shadow->fqid = be32_to_cpu(shadow->fqid);
921 shadow->contextB = be32_to_cpu(shadow->contextB);
922 shadow->seqnum = be16_to_cpu(shadow->seqnum);
923 hw_fd_to_cpu(&shadow->fd);
926 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
928 * VDQCR: don't trust context_b as the FQ may have
929 * been configured for h/w consumption and we're
930 * draining it post-retirement.
934 * We only set QMAN_FQ_STATE_NE when retiring, so we
935 * only need to check for clearing it when doing
936 * volatile dequeues. It's one less thing to check
937 * in the critical path (SDQCR).
939 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
940 fq_clear(fq, QMAN_FQ_STATE_NE);
942 * This is duplicated from the SDQCR code, but we
943 * have stuff to do before *and* after this callback,
944 * and we don't want multiple if()s in the critical
947 res = fq->cb.dqrr(p, fq, dq);
948 if (res == qman_cb_dqrr_stop)
950 /* Check for VDQCR completion */
951 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
954 /* SDQCR: context_b points to the FQ */
955 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
956 fq = get_fq_table_entry(dq->contextB);
958 fq = (void *)(uintptr_t)dq->contextB;
960 /* Now let the callback do its stuff */
961 res = fq->cb.dqrr(p, fq, dq);
963 * The callback can request that we exit without
964 * consuming this entry nor advancing;
966 if (res == qman_cb_dqrr_stop)
969 /* Interpret 'dq' from a driver perspective. */
971 * Parking isn't possible unless HELDACTIVE was set. NB,
972 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
973 * check for HELDACTIVE to cover both.
975 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
976 (res != qman_cb_dqrr_park));
977 /* just means "skip it, I'll consume it myself later on" */
978 if (res != qman_cb_dqrr_defer)
979 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
980 res == qman_cb_dqrr_park);
984 * Entry processed and consumed, increment our counter. The
985 * callback can request that we exit after consuming the
986 * entry, and we also exit if we reach our processing limit,
987 * so loop back only if neither of these conditions is met.
989 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
994 u16 qman_affine_channel(int cpu)
997 struct qman_portal *portal = get_affine_portal();
999 cpu = portal->config->cpu;
1001 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1002 return affine_channels[cpu];
1005 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1007 struct qman_portal *p = get_affine_portal();
1008 const struct qm_dqrr_entry *dq;
1009 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1010 struct qm_dqrr_entry *shadow;
1013 qm_dqrr_pvb_update(&p->p);
1014 dq = qm_dqrr_current(&p->p);
1018 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1019 /* Invalid DQRR - put the portal and consume the DQRR.
1020 * Return NULL to user as no packet is seen.
1022 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1026 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1027 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1030 shadow->fqid = be32_to_cpu(shadow->fqid);
1031 shadow->contextB = be32_to_cpu(shadow->contextB);
1032 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1033 hw_fd_to_cpu(&shadow->fd);
1036 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1037 fq_clear(fq, QMAN_FQ_STATE_NE);
1039 return (struct qm_dqrr_entry *)dq;
1042 void qman_dqrr_consume(struct qman_fq *fq,
1043 struct qm_dqrr_entry *dq)
1045 struct qman_portal *p = get_affine_portal();
1047 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1050 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1051 qm_dqrr_next(&p->p);
1054 int qman_poll_dqrr(unsigned int limit)
1056 struct qman_portal *p = get_affine_portal();
1059 ret = __poll_portal_fast(p, limit);
1063 void qman_poll(void)
1065 struct qman_portal *p = get_affine_portal();
1067 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1068 if (!(p->slowpoll--)) {
1069 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1070 u32 active = __poll_portal_slow(p, is);
1073 qm_isr_status_clear(&p->p, active);
1074 p->slowpoll = SLOW_POLL_BUSY;
1076 p->slowpoll = SLOW_POLL_IDLE;
1079 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1080 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1083 void qman_stop_dequeues(void)
1085 struct qman_portal *p = get_affine_portal();
1087 qman_stop_dequeues_ex(p);
1090 void qman_start_dequeues(void)
1092 struct qman_portal *p = get_affine_portal();
1094 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1095 if (!(--p->dqrr_disable_ref))
1096 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1099 void qman_static_dequeue_add(u32 pools)
1101 struct qman_portal *p = get_affine_portal();
1103 pools &= p->config->pools;
1105 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1108 void qman_static_dequeue_del(u32 pools)
1110 struct qman_portal *p = get_affine_portal();
1112 pools &= p->config->pools;
1114 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1117 u32 qman_static_dequeue_get(void)
1119 struct qman_portal *p = get_affine_portal();
1123 void qman_dca(struct qm_dqrr_entry *dq, int park_request)
1125 struct qman_portal *p = get_affine_portal();
1127 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1130 /* Frame queue API */
1131 static const char *mcr_result_str(u8 result)
1134 case QM_MCR_RESULT_NULL:
1135 return "QM_MCR_RESULT_NULL";
1136 case QM_MCR_RESULT_OK:
1137 return "QM_MCR_RESULT_OK";
1138 case QM_MCR_RESULT_ERR_FQID:
1139 return "QM_MCR_RESULT_ERR_FQID";
1140 case QM_MCR_RESULT_ERR_FQSTATE:
1141 return "QM_MCR_RESULT_ERR_FQSTATE";
1142 case QM_MCR_RESULT_ERR_NOTEMPTY:
1143 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1144 case QM_MCR_RESULT_PENDING:
1145 return "QM_MCR_RESULT_PENDING";
1146 case QM_MCR_RESULT_ERR_BADCOMMAND:
1147 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1149 return "<unknown MCR result>";
1152 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1155 struct qm_mcr_queryfq_np np;
1156 struct qm_mc_command *mcc;
1157 struct qm_mc_result *mcr;
1158 struct qman_portal *p;
1160 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1161 int ret = qman_alloc_fqid(&fqid);
1166 spin_lock_init(&fq->fqlock);
1169 fq->state = qman_fq_state_oos;
1170 fq->cgr_groupid = 0;
1171 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1172 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1173 pr_info("Find empty table entry failed\n");
1177 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1179 /* Everything else is AS_IS support */
1180 p = get_affine_portal();
1181 mcc = qm_mc_start(&p->p);
1182 mcc->queryfq.fqid = cpu_to_be32(fqid);
1183 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1184 while (!(mcr = qm_mc_result(&p->p)))
1186 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1187 if (mcr->result != QM_MCR_RESULT_OK) {
1188 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1191 fqd = mcr->queryfq.fqd;
1192 hw_fqd_to_cpu(&fqd);
1193 mcc = qm_mc_start(&p->p);
1194 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1195 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1196 while (!(mcr = qm_mc_result(&p->p)))
1198 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1199 if (mcr->result != QM_MCR_RESULT_OK) {
1200 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1203 np = mcr->queryfq_np;
1204 /* Phew, have queryfq and queryfq_np results, stitch together
1205 * the FQ object from those.
1207 fq->cgr_groupid = fqd.cgid;
1208 switch (np.state & QM_MCR_NP_STATE_MASK) {
1209 case QM_MCR_NP_STATE_OOS:
1211 case QM_MCR_NP_STATE_RETIRED:
1212 fq->state = qman_fq_state_retired;
1214 fq_set(fq, QMAN_FQ_STATE_NE);
1216 case QM_MCR_NP_STATE_TEN_SCHED:
1217 case QM_MCR_NP_STATE_TRU_SCHED:
1218 case QM_MCR_NP_STATE_ACTIVE:
1219 fq->state = qman_fq_state_sched;
1220 if (np.state & QM_MCR_NP_STATE_R)
1221 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1223 case QM_MCR_NP_STATE_PARKED:
1224 fq->state = qman_fq_state_parked;
1227 DPAA_ASSERT(NULL == "invalid FQ state");
1229 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1230 fq->state |= QMAN_FQ_STATE_CGR_EN;
1233 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1234 qman_release_fqid(fqid);
1238 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1241 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1242 * quiesced. Instead, run some checks.
1244 switch (fq->state) {
1245 case qman_fq_state_parked:
1246 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1248 case qman_fq_state_oos:
1249 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1250 qman_release_fqid(fq->fqid);
1251 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1252 clear_fq_table_entry(fq->key);
1258 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1261 u32 qman_fq_fqid(struct qman_fq *fq)
1266 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1274 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1276 struct qm_mc_command *mcc;
1277 struct qm_mc_result *mcr;
1278 struct qman_portal *p;
1280 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1281 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1283 if ((fq->state != qman_fq_state_oos) &&
1284 (fq->state != qman_fq_state_parked))
1286 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1287 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1290 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1291 /* And can't be set at the same time as TDTHRESH */
1292 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1295 /* Issue an INITFQ_[PARKED|SCHED] management command */
1296 p = get_affine_portal();
1298 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1299 ((fq->state != qman_fq_state_oos) &&
1300 (fq->state != qman_fq_state_parked)))) {
1304 mcc = qm_mc_start(&p->p);
1306 mcc->initfq = *opts;
1307 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1308 mcc->initfq.count = 0;
1310 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1311 * demux pointer. Otherwise, the caller-provided value is allowed to
1312 * stand, don't overwrite it.
1314 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1317 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1318 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1319 mcc->initfq.fqd.context_b = fq->key;
1321 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1324 * and the physical address - NB, if the user wasn't trying to
1325 * set CONTEXTA, clear the stashing settings.
1327 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1328 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1329 memset(&mcc->initfq.fqd.context_a, 0,
1330 sizeof(mcc->initfq.fqd.context_a));
1332 phys_fq = rte_mem_virt2iova(fq);
1333 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1336 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1337 mcc->initfq.fqd.dest.channel = p->config->channel;
1338 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1339 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1340 mcc->initfq.fqd.dest.wq = 4;
1343 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1344 cpu_to_hw_fqd(&mcc->initfq.fqd);
1345 qm_mc_commit(&p->p, myverb);
1346 while (!(mcr = qm_mc_result(&p->p)))
1348 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1350 if (res != QM_MCR_RESULT_OK) {
1355 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1356 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1357 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1359 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1361 if (opts->we_mask & QM_INITFQ_WE_CGID)
1362 fq->cgr_groupid = opts->fqd.cgid;
1364 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1365 qman_fq_state_sched : qman_fq_state_parked;
1370 int qman_schedule_fq(struct qman_fq *fq)
1372 struct qm_mc_command *mcc;
1373 struct qm_mc_result *mcr;
1374 struct qman_portal *p;
1379 if (fq->state != qman_fq_state_parked)
1381 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1382 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1385 /* Issue a ALTERFQ_SCHED management command */
1386 p = get_affine_portal();
1389 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1390 (fq->state != qman_fq_state_parked))) {
1394 mcc = qm_mc_start(&p->p);
1395 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1396 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1397 while (!(mcr = qm_mc_result(&p->p)))
1399 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1401 if (res != QM_MCR_RESULT_OK) {
1405 fq->state = qman_fq_state_sched;
1412 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1414 struct qm_mc_command *mcc;
1415 struct qm_mc_result *mcr;
1416 struct qman_portal *p;
1421 if ((fq->state != qman_fq_state_parked) &&
1422 (fq->state != qman_fq_state_sched))
1424 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1425 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1428 p = get_affine_portal();
1431 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1432 (fq->state == qman_fq_state_retired) ||
1433 (fq->state == qman_fq_state_oos))) {
1437 rval = table_push_fq(p, fq);
1440 mcc = qm_mc_start(&p->p);
1441 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1442 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1443 while (!(mcr = qm_mc_result(&p->p)))
1445 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1448 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1449 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1450 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1451 * friendly, otherwise the caller doesn't necessarily have a fully
1452 * "retired" FQ on return even if the retirement was immediate. However
1453 * this does mean some code duplication between here and
1454 * fq_state_change().
1456 if (likely(res == QM_MCR_RESULT_OK)) {
1458 /* Process 'fq' right away, we'll ignore FQRNI */
1459 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1460 fq_set(fq, QMAN_FQ_STATE_NE);
1461 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1462 fq_set(fq, QMAN_FQ_STATE_ORL);
1464 table_del_fq(p, fq);
1467 fq->state = qman_fq_state_retired;
1470 * Another issue with supporting "immediate" retirement
1471 * is that we're forced to drop FQRNIs, because by the
1472 * time they're seen it may already be "too late" (the
1473 * fq may have been OOS'd and free()'d already). But if
1474 * the upper layer wants a callback whether it's
1475 * immediate or not, we have to fake a "MR" entry to
1476 * look like an FQRNI...
1478 struct qm_mr_entry msg;
1480 msg.verb = QM_MR_VERB_FQRNI;
1481 msg.fq.fqs = mcr->alterfq.fqs;
1482 msg.fq.fqid = fq->fqid;
1483 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1484 msg.fq.contextB = fq->key;
1486 msg.fq.contextB = (u32)(uintptr_t)fq;
1488 fq->cb.fqs(p, fq, &msg);
1490 } else if (res == QM_MCR_RESULT_PENDING) {
1492 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1495 table_del_fq(p, fq);
1502 int qman_oos_fq(struct qman_fq *fq)
1504 struct qm_mc_command *mcc;
1505 struct qm_mc_result *mcr;
1506 struct qman_portal *p;
1511 if (fq->state != qman_fq_state_retired)
1513 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1514 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1517 p = get_affine_portal();
1519 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1520 (fq->state != qman_fq_state_retired))) {
1524 mcc = qm_mc_start(&p->p);
1525 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1526 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1527 while (!(mcr = qm_mc_result(&p->p)))
1529 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1531 if (res != QM_MCR_RESULT_OK) {
1535 fq->state = qman_fq_state_oos;
1541 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1543 struct qm_mc_command *mcc;
1544 struct qm_mc_result *mcr;
1545 struct qman_portal *p;
1551 if ((fq->state == qman_fq_state_oos) ||
1552 (fq->state == qman_fq_state_retired) ||
1553 (fq->state == qman_fq_state_parked))
1556 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1557 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1560 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1561 p = get_affine_portal();
1563 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1564 (fq->state == qman_fq_state_parked) ||
1565 (fq->state == qman_fq_state_oos) ||
1566 (fq->state == qman_fq_state_retired))) {
1570 mcc = qm_mc_start(&p->p);
1571 mcc->alterfq.fqid = fq->fqid;
1572 mcc->alterfq.count = 0;
1573 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1575 qm_mc_commit(&p->p, myverb);
1576 while (!(mcr = qm_mc_result(&p->p)))
1578 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1581 if (res != QM_MCR_RESULT_OK) {
1590 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1592 struct qm_mc_command *mcc;
1593 struct qm_mc_result *mcr;
1594 struct qman_portal *p = get_affine_portal();
1598 mcc = qm_mc_start(&p->p);
1599 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1600 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1601 while (!(mcr = qm_mc_result(&p->p)))
1603 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1605 if (res == QM_MCR_RESULT_OK)
1606 *fqd = mcr->queryfq.fqd;
1608 if (res != QM_MCR_RESULT_OK)
1613 int qman_query_fq_has_pkts(struct qman_fq *fq)
1615 struct qm_mc_command *mcc;
1616 struct qm_mc_result *mcr;
1617 struct qman_portal *p = get_affine_portal();
1622 mcc = qm_mc_start(&p->p);
1623 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1624 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1625 while (!(mcr = qm_mc_result(&p->p)))
1628 if (res == QM_MCR_RESULT_OK)
1629 ret = !!mcr->queryfq_np.frm_cnt;
1633 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1635 struct qm_mc_command *mcc;
1636 struct qm_mc_result *mcr;
1637 struct qman_portal *p = get_affine_portal();
1641 mcc = qm_mc_start(&p->p);
1642 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1643 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1644 while (!(mcr = qm_mc_result(&p->p)))
1646 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1648 if (res == QM_MCR_RESULT_OK) {
1649 *np = mcr->queryfq_np;
1650 np->fqd_link = be24_to_cpu(np->fqd_link);
1651 np->odp_seq = be16_to_cpu(np->odp_seq);
1652 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1653 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1654 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1655 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1656 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1657 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1658 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1659 np->ics_surp = be16_to_cpu(np->ics_surp);
1660 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1661 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1662 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1663 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1664 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1665 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1666 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1668 if (res == QM_MCR_RESULT_ERR_FQID)
1670 else if (res != QM_MCR_RESULT_OK)
1675 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1677 struct qm_mc_command *mcc;
1678 struct qm_mc_result *mcr;
1679 struct qman_portal *p = get_affine_portal();
1683 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1684 QM_MCR_VERB_QUERYWQ;
1685 mcc = qm_mc_start(&p->p);
1686 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1687 qm_mc_commit(&p->p, myverb);
1688 while (!(mcr = qm_mc_result(&p->p)))
1690 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1692 if (res == QM_MCR_RESULT_OK) {
1695 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
1696 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
1697 for (i = 0; i < array_len; i++)
1698 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
1700 if (res != QM_MCR_RESULT_OK) {
1701 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
1707 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
1708 struct qm_mcr_cgrtestwrite *result)
1710 struct qm_mc_command *mcc;
1711 struct qm_mc_result *mcr;
1712 struct qman_portal *p = get_affine_portal();
1716 mcc = qm_mc_start(&p->p);
1717 mcc->cgrtestwrite.cgid = cgr->cgrid;
1718 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
1719 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
1720 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
1721 while (!(mcr = qm_mc_result(&p->p)))
1723 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
1725 if (res == QM_MCR_RESULT_OK)
1726 *result = mcr->cgrtestwrite;
1727 if (res != QM_MCR_RESULT_OK) {
1728 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
1734 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
1736 struct qm_mc_command *mcc;
1737 struct qm_mc_result *mcr;
1738 struct qman_portal *p = get_affine_portal();
1742 mcc = qm_mc_start(&p->p);
1743 mcc->querycgr.cgid = cgr->cgrid;
1744 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
1745 while (!(mcr = qm_mc_result(&p->p)))
1747 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
1749 if (res == QM_MCR_RESULT_OK)
1750 *cgrd = mcr->querycgr;
1751 if (res != QM_MCR_RESULT_OK) {
1752 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
1755 cgrd->cgr.wr_parm_g.word =
1756 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
1757 cgrd->cgr.wr_parm_y.word =
1758 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
1759 cgrd->cgr.wr_parm_r.word =
1760 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
1761 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
1762 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
1763 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
1764 cgrd->cscn_targ_swp[i] =
1765 be32_to_cpu(cgrd->cscn_targ_swp[i]);
1769 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
1771 struct qm_mc_result *mcr;
1772 struct qman_portal *p = get_affine_portal();
1777 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1778 while (!(mcr = qm_mc_result(&p->p)))
1780 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
1781 QM_MCC_VERB_QUERYCONGESTION);
1783 if (res == QM_MCR_RESULT_OK)
1784 *congestion = mcr->querycongestion;
1785 if (res != QM_MCR_RESULT_OK) {
1786 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
1789 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
1790 congestion->state.state[i] =
1791 be32_to_cpu(congestion->state.state[i]);
1795 int qman_set_vdq(struct qman_fq *fq, u16 num)
1797 struct qman_portal *p = get_affine_portal();
1801 vdqcr = QM_VDQCR_EXACT;
1802 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
1804 if ((fq->state != qman_fq_state_parked) &&
1805 (fq->state != qman_fq_state_retired)) {
1809 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
1813 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1815 if (!p->vdqcr_owned) {
1817 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1819 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1821 p->vdqcr_owned = fq;
1826 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1832 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
1835 struct qman_portal *p;
1838 if ((fq->state != qman_fq_state_parked) &&
1839 (fq->state != qman_fq_state_retired))
1841 if (vdqcr & QM_VDQCR_FQID_MASK)
1843 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1845 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1847 p = get_affine_portal();
1849 if (!p->vdqcr_owned) {
1851 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1853 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1855 p->vdqcr_owned = fq;
1863 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1867 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
1870 qm_eqcr_cce_prefetch(&p->p);
1872 qm_eqcr_cce_update(&p->p);
1875 int qman_eqcr_is_empty(void)
1877 struct qman_portal *p = get_affine_portal();
1880 update_eqcr_ci(p, 0);
1881 avail = qm_eqcr_get_fill(&p->p);
1882 return (avail == 0);
1885 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
1888 struct qman_portal *p = get_affine_portal();
1890 p->cb_dc_ern = handler;
1892 cb_dc_ern = handler;
1895 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
1897 const struct qm_fd *fd,
1900 struct qm_eqcr_entry *eq;
1903 if (p->use_eqcr_ci_stashing) {
1905 * The stashing case is easy, only update if we need to in
1906 * order to try and liberate ring entries.
1908 eq = qm_eqcr_start_stash(&p->p);
1911 * The non-stashing case is harder, need to prefetch ahead of
1914 avail = qm_eqcr_get_avail(&p->p);
1916 update_eqcr_ci(p, avail);
1917 eq = qm_eqcr_start_no_stash(&p->p);
1923 if (flags & QMAN_ENQUEUE_FLAG_DCA)
1924 eq->dca = QM_EQCR_DCA_ENABLE |
1925 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
1926 QM_EQCR_DCA_PARK : 0) |
1927 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
1928 eq->fqid = cpu_to_be32(fq->fqid);
1929 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1930 eq->tag = cpu_to_be32(fq->key);
1932 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1935 cpu_to_hw_fd(&eq->fd);
1939 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
1941 struct qman_portal *p = get_affine_portal();
1942 struct qm_eqcr_entry *eq;
1944 eq = try_p_eq_start(p, fq, fd, flags);
1947 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
1948 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
1949 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
1950 /* Factor the below out, it's used from qman_enqueue_orp() too */
1954 int qman_enqueue_multi(struct qman_fq *fq,
1955 const struct qm_fd *fd,
1958 struct qman_portal *p = get_affine_portal();
1959 struct qm_portal *portal = &p->p;
1961 register struct qm_eqcr *eqcr = &portal->eqcr;
1962 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
1964 u8 i, diff, old_ci, sent = 0;
1966 /* Update the available entries if no entry is free */
1967 if (!eqcr->available) {
1969 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
1970 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
1971 eqcr->available += diff;
1976 /* try to send as many frames as possible */
1977 while (eqcr->available && frames_to_send--) {
1978 eq->fqid = cpu_to_be32(fq->fqid);
1979 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1980 eq->tag = cpu_to_be32(fq->key);
1982 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1984 eq->fd.opaque_addr = fd->opaque_addr;
1985 eq->fd.addr = cpu_to_be40(fd->addr);
1986 eq->fd.status = cpu_to_be32(fd->status);
1987 eq->fd.opaque = cpu_to_be32(fd->opaque);
1989 eq = (void *)((unsigned long)(eq + 1) &
1990 (~(unsigned long)(QM_EQCR_SIZE << 6)));
1997 /* In order for flushes to complete faster, all lines are recorded in
2001 for (i = 0; i < sent; i++) {
2002 eq->__dont_write_directly__verb =
2003 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2005 eq = (void *)((unsigned long)(eq + 1) &
2006 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2007 if (unlikely((prev_eq + 1) != eq))
2008 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2011 /* We need to flush all the lines but without load/store operations
2015 for (i = 0; i < sent; i++) {
2017 eq = (void *)((unsigned long)(eq + 1) &
2018 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2020 /* Update cursor for the next call */
2025 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2026 struct qman_fq *orp, u16 orp_seqnum)
2028 struct qman_portal *p = get_affine_portal();
2029 struct qm_eqcr_entry *eq;
2031 eq = try_p_eq_start(p, fq, fd, flags);
2034 /* Process ORP-specifics here */
2035 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2036 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2038 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2039 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2040 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2042 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2043 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2045 eq->seqnum = cpu_to_be16(orp_seqnum);
2046 eq->orp = cpu_to_be32(orp->fqid);
2047 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2048 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2049 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2050 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2051 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2056 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2057 struct qm_mcc_initcgr *opts)
2059 struct qm_mc_command *mcc;
2060 struct qm_mc_result *mcr;
2061 struct qman_portal *p = get_affine_portal();
2064 u8 verb = QM_MCC_VERB_MODIFYCGR;
2066 mcc = qm_mc_start(&p->p);
2068 mcc->initcgr = *opts;
2069 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2070 mcc->initcgr.cgr.wr_parm_g.word =
2071 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2072 mcc->initcgr.cgr.wr_parm_y.word =
2073 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2074 mcc->initcgr.cgr.wr_parm_r.word =
2075 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2076 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2077 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2079 mcc->initcgr.cgid = cgr->cgrid;
2080 if (flags & QMAN_CGR_FLAG_USE_INIT)
2081 verb = QM_MCC_VERB_INITCGR;
2082 qm_mc_commit(&p->p, verb);
2083 while (!(mcr = qm_mc_result(&p->p)))
2086 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2088 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2091 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2092 QM_CHANNEL_SWPORTAL0))
2093 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2094 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2096 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2097 struct qm_mcc_initcgr *opts)
2099 struct qm_mcr_querycgr cgr_state;
2100 struct qm_mcc_initcgr local_opts;
2102 struct qman_portal *p;
2104 /* We have to check that the provided CGRID is within the limits of the
2105 * data-structures, for obvious reasons. However we'll let h/w take
2106 * care of determining whether it's within the limits of what exists on
2109 if (cgr->cgrid >= __CGR_NUM)
2112 p = get_affine_portal();
2114 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2115 cgr->chan = p->config->channel;
2116 spin_lock(&p->cgr_lock);
2118 /* if no opts specified, just add it to the list */
2122 ret = qman_query_cgr(cgr, &cgr_state);
2127 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2128 local_opts.cgr.cscn_targ_upd_ctrl =
2129 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2131 /* Overwrite TARG */
2132 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2134 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2136 /* send init if flags indicate so */
2137 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2138 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2140 ret = qman_modify_cgr(cgr, 0, &local_opts);
2144 list_add(&cgr->node, &p->cgr_cbs);
2146 /* Determine if newly added object requires its callback to be called */
2147 ret = qman_query_cgr(cgr, &cgr_state);
2149 /* we can't go back, so proceed and return success, but screen
2150 * and wail to the log file.
2152 pr_crit("CGR HW state partially modified\n");
2156 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2160 spin_unlock(&p->cgr_lock);
2164 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2165 struct qm_mcc_initcgr *opts)
2167 struct qm_mcc_initcgr local_opts;
2168 struct qm_mcr_querycgr cgr_state;
2171 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2172 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2175 /* We have to check that the provided CGRID is within the limits of the
2176 * data-structures, for obvious reasons. However we'll let h/w take
2177 * care of determining whether it's within the limits of what exists on
2180 if (cgr->cgrid >= __CGR_NUM)
2183 ret = qman_query_cgr(cgr, &cgr_state);
2187 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2191 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2192 local_opts.cgr.cscn_targ_upd_ctrl =
2193 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2194 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2196 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2197 TARG_DCP_MASK(dcp_portal);
2198 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2200 /* send init if flags indicate so */
2201 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2202 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2205 ret = qman_modify_cgr(cgr, 0, &local_opts);
2210 int qman_delete_cgr(struct qman_cgr *cgr)
2212 struct qm_mcr_querycgr cgr_state;
2213 struct qm_mcc_initcgr local_opts;
2216 struct qman_portal *p = get_affine_portal();
2218 if (cgr->chan != p->config->channel) {
2219 pr_crit("Attempting to delete cgr from different portal than"
2220 " it was create: create 0x%x, delete 0x%x\n",
2221 cgr->chan, p->config->channel);
2225 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2226 spin_lock(&p->cgr_lock);
2227 list_del(&cgr->node);
2229 * If there are no other CGR objects for this CGRID in the list,
2230 * update CSCN_TARG accordingly
2232 list_for_each_entry(i, &p->cgr_cbs, node)
2233 if ((i->cgrid == cgr->cgrid) && i->cb)
2235 ret = qman_query_cgr(cgr, &cgr_state);
2237 /* add back to the list */
2238 list_add(&cgr->node, &p->cgr_cbs);
2241 /* Overwrite TARG */
2242 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2243 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2244 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2246 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2248 ret = qman_modify_cgr(cgr, 0, &local_opts);
2250 /* add back to the list */
2251 list_add(&cgr->node, &p->cgr_cbs);
2253 spin_unlock(&p->cgr_lock);
2258 int qman_shutdown_fq(u32 fqid)
2260 struct qman_portal *p;
2261 struct qm_portal *low_p;
2262 struct qm_mc_command *mcc;
2263 struct qm_mc_result *mcr;
2265 int orl_empty, fq_empty, drain = 0;
2270 p = get_affine_portal();
2273 /* Determine the state of the FQID */
2274 mcc = qm_mc_start(low_p);
2275 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2276 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2277 while (!(mcr = qm_mc_result(low_p)))
2279 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2280 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2281 if (state == QM_MCR_NP_STATE_OOS)
2282 return 0; /* Already OOS, no need to do anymore checks */
2284 /* Query which channel the FQ is using */
2285 mcc = qm_mc_start(low_p);
2286 mcc->queryfq.fqid = cpu_to_be32(fqid);
2287 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2288 while (!(mcr = qm_mc_result(low_p)))
2290 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2292 /* Need to store these since the MCR gets reused */
2293 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2294 channel = dest_wq & 0x7;
2298 case QM_MCR_NP_STATE_TEN_SCHED:
2299 case QM_MCR_NP_STATE_TRU_SCHED:
2300 case QM_MCR_NP_STATE_ACTIVE:
2301 case QM_MCR_NP_STATE_PARKED:
2303 mcc = qm_mc_start(low_p);
2304 mcc->alterfq.fqid = cpu_to_be32(fqid);
2305 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2306 while (!(mcr = qm_mc_result(low_p)))
2308 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2309 QM_MCR_VERB_ALTER_RETIRE);
2310 result = mcr->result; /* Make a copy as we reuse MCR below */
2312 if (result == QM_MCR_RESULT_PENDING) {
2313 /* Need to wait for the FQRN in the message ring, which
2314 * will only occur once the FQ has been drained. In
2315 * order for the FQ to drain the portal needs to be set
2316 * to dequeue from the channel the FQ is scheduled on
2318 const struct qm_mr_entry *msg;
2319 const struct qm_dqrr_entry *dqrr = NULL;
2321 __maybe_unused u16 dequeue_wq = 0;
2323 /* Flag that we need to drain FQ */
2326 if (channel >= qm_channel_pool1 &&
2327 channel < (u16)(qm_channel_pool1 + 15)) {
2328 /* Pool channel, enable the bit in the portal */
2329 dequeue_wq = (channel -
2330 qm_channel_pool1 + 1) << 4 | wq;
2331 } else if (channel < qm_channel_pool1) {
2332 /* Dedicated channel */
2335 pr_info("Cannot recover FQ 0x%x,"
2336 " it is scheduled on channel 0x%x",
2340 /* Set the sdqcr to drain this channel */
2341 if (channel < qm_channel_pool1)
2342 qm_dqrr_sdqcr_set(low_p,
2343 QM_SDQCR_TYPE_ACTIVE |
2344 QM_SDQCR_CHANNELS_DEDICATED);
2346 qm_dqrr_sdqcr_set(low_p,
2347 QM_SDQCR_TYPE_ACTIVE |
2348 QM_SDQCR_CHANNELS_POOL_CONV
2350 while (!found_fqrn) {
2351 /* Keep draining DQRR while checking the MR*/
2352 qm_dqrr_pvb_update(low_p);
2353 dqrr = qm_dqrr_current(low_p);
2355 qm_dqrr_cdc_consume_1ptr(
2357 qm_dqrr_pvb_update(low_p);
2358 qm_dqrr_next(low_p);
2359 dqrr = qm_dqrr_current(low_p);
2361 /* Process message ring too */
2362 qm_mr_pvb_update(low_p);
2363 msg = qm_mr_current(low_p);
2366 QM_MR_VERB_TYPE_MASK)
2370 qm_mr_cci_consume_to_current(low_p);
2371 qm_mr_pvb_update(low_p);
2372 msg = qm_mr_current(low_p);
2377 if (result != QM_MCR_RESULT_OK &&
2378 result != QM_MCR_RESULT_PENDING) {
2380 pr_err("qman_retire_fq failed on FQ 0x%x,"
2381 " result=0x%x\n", fqid, result);
2384 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2385 /* ORL had no entries, no need to wait until the
2390 /* Retirement succeeded, check to see if FQ needs
2393 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2394 /* FQ is Not Empty, drain using volatile DQ commands */
2397 const struct qm_dqrr_entry *dqrr = NULL;
2398 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2400 qm_dqrr_vdqcr_set(low_p, vdqcr);
2402 /* Wait for a dequeue to occur */
2403 while (dqrr == NULL) {
2404 qm_dqrr_pvb_update(low_p);
2405 dqrr = qm_dqrr_current(low_p);
2409 /* Process the dequeues, making sure to
2410 * empty the ring completely.
2413 if (dqrr->fqid == fqid &&
2414 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2416 qm_dqrr_cdc_consume_1ptr(low_p,
2418 qm_dqrr_pvb_update(low_p);
2419 qm_dqrr_next(low_p);
2420 dqrr = qm_dqrr_current(low_p);
2422 } while (fq_empty == 0);
2424 qm_dqrr_sdqcr_set(low_p, 0);
2426 /* Wait for the ORL to have been completely drained */
2427 while (orl_empty == 0) {
2428 const struct qm_mr_entry *msg;
2430 qm_mr_pvb_update(low_p);
2431 msg = qm_mr_current(low_p);
2433 if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
2437 qm_mr_cci_consume_to_current(low_p);
2438 qm_mr_pvb_update(low_p);
2439 msg = qm_mr_current(low_p);
2443 mcc = qm_mc_start(low_p);
2444 mcc->alterfq.fqid = cpu_to_be32(fqid);
2445 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2446 while (!(mcr = qm_mc_result(low_p)))
2448 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2449 QM_MCR_VERB_ALTER_OOS);
2450 if (mcr->result != QM_MCR_RESULT_OK) {
2452 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2458 case QM_MCR_NP_STATE_RETIRED:
2459 /* Send OOS Command */
2460 mcc = qm_mc_start(low_p);
2461 mcc->alterfq.fqid = cpu_to_be32(fqid);
2462 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2463 while (!(mcr = qm_mc_result(low_p)))
2465 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2466 QM_MCR_VERB_ALTER_OOS);
2468 pr_err("OOS Failed on FQID 0x%x\n", fqid);