1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
9 #include <rte_branch_prediction.h>
10 #include <rte_dpaa_bus.h>
11 #include <rte_eventdev.h>
12 #include <rte_byteorder.h>
14 /* Compilation constants */
15 #define DQRR_MAXFILL 15
16 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
17 #define IRQNAME "QMan portal %d"
18 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
19 /* maximum number of DQRR entries to process in qman_poll() */
20 #define FSL_QMAN_POLL_LIMIT 8
22 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
23 * inter-processor locking only. Note, FQLOCK() is always called either under a
24 * local_irq_save() or from interrupt context - hence there's no need for irq
25 * protection (and indeed, attempting to nest irq-protection doesn't work, as
26 * the "irq en/disable" machinery isn't recursive...).
30 struct qman_fq *__fq478 = (fq); \
31 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
32 spin_lock(&__fq478->fqlock); \
34 #define FQUNLOCK(fq) \
36 struct qman_fq *__fq478 = (fq); \
37 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
38 spin_unlock(&__fq478->fqlock); \
41 static inline void fq_set(struct qman_fq *fq, u32 mask)
43 dpaa_set_bits(mask, &fq->flags);
46 static inline void fq_clear(struct qman_fq *fq, u32 mask)
48 dpaa_clear_bits(mask, &fq->flags);
51 static inline int fq_isset(struct qman_fq *fq, u32 mask)
53 return fq->flags & mask;
56 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
58 return !(fq->flags & mask);
63 /* PORTAL_BITS_*** - dynamic, strictly internal */
65 /* interrupt sources processed by portal_isr(), configurable */
66 unsigned long irq_sources;
67 u32 use_eqcr_ci_stashing;
68 u32 slowpoll; /* only used when interrupts are off */
69 /* only 1 volatile dequeue at a time */
70 struct qman_fq *vdqcr_owned;
73 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
74 * handler is called instead.
76 qman_cb_dc_ern cb_dc_ern;
77 /* When the cpu-affine portal is activated, this is non-NULL */
78 const struct qm_portal_config *config;
79 struct dpa_rbtree retire_table;
80 char irqname[MAX_IRQNAME];
81 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
82 struct qman_cgrs *cgrs;
83 /* linked-list of CSCN handlers. */
84 struct list_head cgr_cbs;
87 /* track if memory was allocated by the driver */
88 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
89 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
90 * do byte swaps of DQRR read only memory. First entry must be aligned
91 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
92 * address (6 bits for address shift + 4 bits for the DQRR size).
94 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
95 __attribute__((aligned(1024)));
99 /* Global handler for DCP ERNs. Used when the portal receiving the message does
100 * not have a portal-specific handler.
102 static qman_cb_dc_ern cb_dc_ern;
104 static cpumask_t affine_mask;
105 static DEFINE_SPINLOCK(affine_mask_lock);
106 static u16 affine_channels[NR_CPUS];
107 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
109 static inline struct qman_portal *get_affine_portal(void)
111 return &RTE_PER_LCORE(qman_affine_portal);
114 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
115 * retirement notifications (the fact they are sometimes h/w-consumed means that
116 * contextB isn't always a s/w demux - and as we can't know which case it is
117 * when looking at the notification, we have to use the slow lookup for all of
118 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
119 * (though at most one of them should be the consumer), so this table isn't for
120 * all FQs - FQs are added when retirement commands are issued, and removed when
121 * they complete, which also massively reduces the size of this table.
123 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
125 * This is what everything can wait on, even if it migrates to a different cpu
126 * to the one whose affine portal it is waiting on.
128 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
130 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
132 int ret = fqtree_push(&p->retire_table, fq);
135 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
139 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
141 fqtree_del(&p->retire_table, fq);
144 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
146 return fqtree_find(&p->retire_table, fqid);
149 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
150 static void **qman_fq_lookup_table;
151 static size_t qman_fq_lookup_table_size;
153 int qman_setup_fq_lookup_table(size_t num_entries)
156 /* Allocate 1 more entry since the first entry is not used */
157 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
158 if (!qman_fq_lookup_table) {
159 pr_err("QMan: Could not allocate fq lookup table\n");
162 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
163 qman_fq_lookup_table_size = num_entries;
164 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
165 qman_fq_lookup_table,
166 (unsigned long)qman_fq_lookup_table_size);
170 /* global structure that maintains fq object mapping */
171 static DEFINE_SPINLOCK(fq_hash_table_lock);
173 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
177 spin_lock(&fq_hash_table_lock);
178 /* Can't use index zero because this has special meaning
179 * in context_b field.
181 for (i = 1; i < qman_fq_lookup_table_size; i++) {
182 if (qman_fq_lookup_table[i] == NULL) {
184 qman_fq_lookup_table[i] = fq;
185 spin_unlock(&fq_hash_table_lock);
189 spin_unlock(&fq_hash_table_lock);
193 static void clear_fq_table_entry(u32 entry)
195 spin_lock(&fq_hash_table_lock);
196 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
197 qman_fq_lookup_table[entry] = NULL;
198 spin_unlock(&fq_hash_table_lock);
201 static inline struct qman_fq *get_fq_table_entry(u32 entry)
203 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
204 return qman_fq_lookup_table[entry];
208 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
210 /* Byteswap the FQD to HW format */
211 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
212 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
213 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
214 fqd->context_b = cpu_to_be32(fqd->context_b);
215 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
216 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
219 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
221 /* Byteswap the FQD to CPU format */
222 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
223 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
224 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
225 fqd->context_b = be32_to_cpu(fqd->context_b);
226 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
229 static inline void cpu_to_hw_fd(struct qm_fd *fd)
231 fd->addr = cpu_to_be40(fd->addr);
232 fd->status = cpu_to_be32(fd->status);
233 fd->opaque = cpu_to_be32(fd->opaque);
236 static inline void hw_fd_to_cpu(struct qm_fd *fd)
238 fd->addr = be40_to_cpu(fd->addr);
239 fd->status = be32_to_cpu(fd->status);
240 fd->opaque = be32_to_cpu(fd->opaque);
243 /* In the case that slow- and fast-path handling are both done by qman_poll()
244 * (ie. because there is no interrupt handling), we ought to balance how often
245 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
246 * sources, so we call the fast poll 'n' times before calling the slow poll
247 * once. The idle decrementer constant is used when the last slow-poll detected
248 * no work to do, and the busy decrementer constant when the last slow-poll had
251 #define SLOW_POLL_IDLE 1000
252 #define SLOW_POLL_BUSY 10
253 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
254 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
255 unsigned int poll_limit);
257 /* Portal interrupt handler */
258 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
260 struct qman_portal *p = ptr;
262 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
263 * it could race against a Query Congestion State command also given
264 * as part of the handling of this interrupt source. We mustn't
265 * clear it a second time in this top-level function.
267 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
268 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
269 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
270 /* DQRR-handling if it's interrupt-driven */
271 if (is & QM_PIRQ_DQRI)
272 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
273 /* Handling of anything else that's interrupt-driven */
274 clear |= __poll_portal_slow(p, is);
275 qm_isr_status_clear(&p->p, clear);
279 /* This inner version is used privately by qman_create_affine_portal(), as well
280 * as by the exported qman_stop_dequeues().
282 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
284 if (!(p->dqrr_disable_ref++))
285 qm_dqrr_set_maxfill(&p->p, 0);
288 static int drain_mr_fqrni(struct qm_portal *p)
290 const struct qm_mr_entry *msg;
292 msg = qm_mr_current(p);
295 * if MR was full and h/w had other FQRNI entries to produce, we
296 * need to allow it time to produce those entries once the
297 * existing entries are consumed. A worst-case situation
298 * (fully-loaded system) means h/w sequencers may have to do 3-4
299 * other things before servicing the portal's MR pump, each of
300 * which (if slow) may take ~50 qman cycles (which is ~200
301 * processor cycles). So rounding up and then multiplying this
302 * worst-case estimate by a factor of 10, just to be
303 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
304 * one entry at a time, so h/w has an opportunity to produce new
305 * entries well before the ring has been fully consumed, so
306 * we're being *really* paranoid here.
308 u64 now, then = mfatb();
312 } while ((then + 10000) > now);
313 msg = qm_mr_current(p);
317 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
318 /* We aren't draining anything but FQRNIs */
319 pr_err("Found verb 0x%x in MR\n", msg->verb);
323 qm_mr_cci_consume(p, 1);
327 static inline int qm_eqcr_init(struct qm_portal *portal,
328 enum qm_eqcr_pmode pmode,
329 unsigned int eq_stash_thresh,
332 /* This use of 'register', as well as all other occurrences, is because
333 * it has been observed to generate much faster code with gcc than is
334 * otherwise the case.
336 register struct qm_eqcr *eqcr = &portal->eqcr;
340 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
341 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
342 qm_cl_invalidate(EQCR_CI);
343 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
344 eqcr->cursor = eqcr->ring + pi;
345 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
346 QM_EQCR_VERB_VBIT : 0;
347 eqcr->available = QM_EQCR_SIZE - 1 -
348 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
349 eqcr->ithresh = qm_in(EQCR_ITR);
350 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
354 cfg = (qm_in(CFG) & 0x00ffffff) |
355 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
356 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
357 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
362 static inline void qm_eqcr_finish(struct qm_portal *portal)
364 register struct qm_eqcr *eqcr = &portal->eqcr;
369 * Disable EQCI stashing because the QMan only
370 * presents the value it previously stashed to
371 * maintain coherency. Setting the stash threshold
372 * to 1 then 0 ensures that QMan has resyncronized
373 * its internal copy so that the portal is clean
374 * when it is reinitialized in the future
376 cfg = (qm_in(CFG) & 0x0fffffff) |
377 (1 << 28); /* QCSP_CFG: EST */
379 cfg &= 0x0fffffff; /* stash threshold = 0 */
382 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
383 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
385 /* Refresh EQCR CI cache value */
386 qm_cl_invalidate(EQCR_CI);
387 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
389 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
390 DPAA_ASSERT(!eqcr->busy);
392 if (pi != EQCR_PTR2IDX(eqcr->cursor))
393 pr_crit("losing uncommitted EQCR entries\n");
395 pr_crit("missing existing EQCR completions\n");
396 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
397 pr_crit("EQCR destroyed unquiesced\n");
400 static inline int qm_dqrr_init(struct qm_portal *portal,
401 __maybe_unused const struct qm_portal_config *config,
402 enum qm_dqrr_dmode dmode,
403 __maybe_unused enum qm_dqrr_pmode pmode,
404 enum qm_dqrr_cmode cmode, u8 max_fill)
406 register struct qm_dqrr *dqrr = &portal->dqrr;
409 /* Make sure the DQRR will be idle when we enable */
410 qm_out(DQRR_SDQCR, 0);
411 qm_out(DQRR_VDQCR, 0);
412 qm_out(DQRR_PDQCR, 0);
413 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
414 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
415 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
416 dqrr->cursor = dqrr->ring + dqrr->ci;
417 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
418 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
419 QM_DQRR_VERB_VBIT : 0;
420 dqrr->ithresh = qm_in(DQRR_ITR);
421 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
426 /* Invalidate every ring entry before beginning */
427 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
428 dccivac(qm_cl(dqrr->ring, cfg));
429 cfg = (qm_in(CFG) & 0xff000f00) |
430 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
431 ((dmode & 1) << 18) | /* DP */
432 ((cmode & 3) << 16) | /* DCM */
434 (0 ? 0x40 : 0) | /* Ignore RP */
435 (0 ? 0x10 : 0); /* Ignore SP */
437 qm_dqrr_set_maxfill(portal, max_fill);
441 static inline void qm_dqrr_finish(struct qm_portal *portal)
443 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
444 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
445 if ((dqrr->cmode != qm_dqrr_cdc) &&
446 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
447 pr_crit("Ignoring completed DQRR entries\n");
451 static inline int qm_mr_init(struct qm_portal *portal,
452 __maybe_unused enum qm_mr_pmode pmode,
453 enum qm_mr_cmode cmode)
455 register struct qm_mr *mr = &portal->mr;
458 mr->ring = portal->addr.ce + QM_CL_MR;
459 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
460 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
461 mr->cursor = mr->ring + mr->ci;
462 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
463 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
464 mr->ithresh = qm_in(MR_ITR);
465 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
469 cfg = (qm_in(CFG) & 0xfffff0ff) |
470 ((cmode & 1) << 8); /* QCSP_CFG:MM */
475 static inline void qm_mr_pvb_update(struct qm_portal *portal)
477 register struct qm_mr *mr = &portal->mr;
478 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
480 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
481 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
483 /* when accessing 'verb', use __raw_readb() to ensure that compiler
484 * inlining doesn't try to optimise out "excess reads".
486 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
487 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
489 mr->vbit ^= QM_MR_VERB_VBIT;
497 struct qman_portal *qman_create_portal(
498 struct qman_portal *portal,
499 const struct qm_portal_config *c,
500 const struct qman_cgrs *cgrs)
509 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
510 portal->use_eqcr_ci_stashing = 3;
512 portal->use_eqcr_ci_stashing =
513 ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
516 * prep the low-level portal struct with the mapped addresses from the
517 * config, everything that follows depends on it and "config" is more
520 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
521 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
523 * If CI-stashing is used, the current defaults use a threshold of 3,
524 * and stash with high-than-DQRR priority.
526 if (qm_eqcr_init(p, qm_eqcr_pvb,
527 portal->use_eqcr_ci_stashing, 1)) {
528 pr_err("Qman EQCR initialisation failed\n");
531 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
532 qm_dqrr_cdc, DQRR_MAXFILL)) {
533 pr_err("Qman DQRR initialisation failed\n");
536 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
537 pr_err("Qman MR initialisation failed\n");
541 pr_err("Qman MC initialisation failed\n");
545 /* static interrupt-gating controls */
546 qm_dqrr_set_ithresh(p, 0);
547 qm_mr_set_ithresh(p, 0);
548 qm_isr_set_iperiod(p, 0);
549 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
552 /* initial snapshot is no-depletion */
553 qman_cgrs_init(&portal->cgrs[1]);
555 portal->cgrs[0] = *cgrs;
557 /* if the given mask is NULL, assume all CGRs can be seen */
558 qman_cgrs_fill(&portal->cgrs[0]);
559 INIT_LIST_HEAD(&portal->cgr_cbs);
560 spin_lock_init(&portal->cgr_lock);
562 portal->slowpoll = 0;
563 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
564 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
565 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
566 portal->dqrr_disable_ref = 0;
567 portal->cb_dc_ern = NULL;
568 sprintf(buf, "qportal-%d", c->channel);
569 dpa_rbtree_init(&portal->retire_table);
571 qm_isr_disable_write(p, isdr);
572 portal->irq_sources = 0;
573 qm_isr_enable_write(p, portal->irq_sources);
574 qm_isr_status_clear(p, 0xffffffff);
575 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
576 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
578 pr_err("request_irq() failed\n");
582 /* Need EQCR to be empty before continuing */
583 isdr &= ~QM_PIRQ_EQCI;
584 qm_isr_disable_write(p, isdr);
585 ret = qm_eqcr_get_fill(p);
587 pr_err("Qman EQCR unclean\n");
588 goto fail_eqcr_empty;
590 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
591 qm_isr_disable_write(p, isdr);
592 if (qm_dqrr_current(p)) {
593 pr_err("Qman DQRR unclean\n");
594 qm_dqrr_cdc_consume_n(p, 0xffff);
596 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
597 /* special handling, drain just in case it's a few FQRNIs */
598 if (drain_mr_fqrni(p))
599 goto fail_dqrr_mr_empty;
603 qm_isr_disable_write(p, 0);
605 /* Write a sane SDQCR */
606 qm_dqrr_sdqcr_set(p, portal->sdqcr);
610 free_irq(c->irq, portal);
613 spin_lock_destroy(&portal->cgr_lock);
626 #define MAX_GLOBAL_PORTALS 8
627 static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
628 static int global_portals_used[MAX_GLOBAL_PORTALS];
630 static struct qman_portal *
631 qman_alloc_global_portal(void)
635 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
636 if (global_portals_used[i] == 0) {
637 global_portals_used[i] = 1;
638 return &global_portals[i];
641 pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
647 qman_free_global_portal(struct qman_portal *portal)
651 for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
652 if (&global_portals[i] == portal) {
653 global_portals_used[i] = 0;
660 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
661 const struct qman_cgrs *cgrs,
664 struct qman_portal *res;
665 struct qman_portal *portal;
668 portal = qman_alloc_global_portal();
670 portal = get_affine_portal();
672 /* A criteria for calling this function (from qman_driver.c) is that
673 * we're already affine to the cpu and won't schedule onto another cpu.
676 res = qman_create_portal(portal, c, cgrs);
678 spin_lock(&affine_mask_lock);
679 CPU_SET(c->cpu, &affine_mask);
680 affine_channels[c->cpu] =
682 spin_unlock(&affine_mask_lock);
688 void qman_destroy_portal(struct qman_portal *qm)
690 const struct qm_portal_config *pcfg;
692 /* Stop dequeues on the portal */
693 qm_dqrr_sdqcr_set(&qm->p, 0);
696 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
697 * something related to QM_PIRQ_EQCI, this may need fixing.
698 * Also, due to the prefetching model used for CI updates in the enqueue
699 * path, this update will only invalidate the CI cacheline *after*
700 * working on it, so we need to call this twice to ensure a full update
701 * irrespective of where the enqueue processing was at when the teardown
704 qm_eqcr_cce_update(&qm->p);
705 qm_eqcr_cce_update(&qm->p);
708 free_irq(pcfg->irq, qm);
711 qm_mc_finish(&qm->p);
712 qm_mr_finish(&qm->p);
713 qm_dqrr_finish(&qm->p);
714 qm_eqcr_finish(&qm->p);
718 spin_lock_destroy(&qm->cgr_lock);
721 const struct qm_portal_config *
722 qman_destroy_affine_portal(struct qman_portal *qp)
724 /* We don't want to redirect if we're a slave, use "raw" */
725 struct qman_portal *qm;
726 const struct qm_portal_config *pcfg;
730 qm = get_affine_portal();
736 qman_destroy_portal(qm);
738 spin_lock(&affine_mask_lock);
739 CPU_CLR(cpu, &affine_mask);
740 spin_unlock(&affine_mask_lock);
742 qman_free_global_portal(qm);
747 int qman_get_portal_index(void)
749 struct qman_portal *p = get_affine_portal();
750 return p->config->index;
753 /* Inline helper to reduce nesting in __poll_portal_slow() */
754 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
755 const struct qm_mr_entry *msg, u8 verb)
759 case QM_MR_VERB_FQRL:
760 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
761 fq_clear(fq, QMAN_FQ_STATE_ORL);
764 case QM_MR_VERB_FQRN:
765 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
766 (fq->state == qman_fq_state_sched));
767 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
768 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
769 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
770 fq_set(fq, QMAN_FQ_STATE_NE);
771 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
772 fq_set(fq, QMAN_FQ_STATE_ORL);
775 fq->state = qman_fq_state_retired;
777 case QM_MR_VERB_FQPN:
778 DPAA_ASSERT(fq->state == qman_fq_state_sched);
779 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
780 fq->state = qman_fq_state_parked;
785 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
787 const struct qm_mr_entry *msg;
788 struct qm_mr_entry swapped_msg;
790 if (is & QM_PIRQ_CSCI) {
791 struct qman_cgrs rr, c;
792 struct qm_mc_result *mcr;
793 struct qman_cgr *cgr;
795 spin_lock(&p->cgr_lock);
797 * The CSCI bit must be cleared _before_ issuing the
798 * Query Congestion State command, to ensure that a long
799 * CGR State Change callback cannot miss an intervening
802 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
804 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
805 while (!(mcr = qm_mc_result(&p->p)))
807 /* mask out the ones I'm not interested in */
808 qman_cgrs_and(&rr, (const struct qman_cgrs *)
809 &mcr->querycongestion.state, &p->cgrs[0]);
810 /* check previous snapshot for delta, enter/exit congestion */
811 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
812 /* update snapshot */
813 qman_cgrs_cp(&p->cgrs[1], &rr);
814 /* Invoke callback */
815 list_for_each_entry(cgr, &p->cgr_cbs, node)
816 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
817 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
818 spin_unlock(&p->cgr_lock);
821 if (is & QM_PIRQ_EQRI) {
822 qm_eqcr_cce_update(&p->p);
823 qm_eqcr_set_ithresh(&p->p, 0);
824 wake_up(&affine_queue);
827 if (is & QM_PIRQ_MRI) {
831 qm_mr_pvb_update(&p->p);
832 msg = qm_mr_current(&p->p);
836 hw_fd_to_cpu(&swapped_msg.ern.fd);
837 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
838 /* The message is a software ERN iff the 0x20 bit is set */
841 case QM_MR_VERB_FQRNI:
842 /* nada, we drop FQRNIs on the floor */
844 case QM_MR_VERB_FQRN:
845 case QM_MR_VERB_FQRL:
846 /* Lookup in the retirement table */
847 fq = table_find_fq(p,
848 be32_to_cpu(msg->fq.fqid));
850 fq_state_change(p, fq, &swapped_msg, verb);
852 fq->cb.fqs(p, fq, &swapped_msg);
854 case QM_MR_VERB_FQPN:
856 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
857 fq = get_fq_table_entry(
858 be32_to_cpu(msg->fq.contextB));
860 fq = (void *)(uintptr_t)
861 be32_to_cpu(msg->fq.contextB);
863 fq_state_change(p, fq, msg, verb);
865 fq->cb.fqs(p, fq, &swapped_msg);
867 case QM_MR_VERB_DC_ERN:
870 p->cb_dc_ern(p, msg);
874 static int warn_once;
877 pr_crit("Leaking DCP ERNs!\n");
883 pr_crit("Invalid MR verb 0x%02x\n", verb);
886 /* Its a software ERN */
887 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
888 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
890 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
892 fq->cb.ern(p, fq, &swapped_msg);
898 qm_mr_cci_consume(&p->p, num);
901 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
902 * processing. If that interrupt source has meanwhile been re-asserted,
903 * we mustn't clear it here (or in the top-level interrupt handler).
905 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
909 * remove some slowish-path stuff from the "fast path" and make sure it isn't
912 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
914 p->vdqcr_owned = NULL;
916 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
918 wake_up(&affine_queue);
922 * The only states that would conflict with other things if they ran at the
923 * same time on the same cpu are:
925 * (i) setting/clearing vdqcr_owned, and
926 * (ii) clearing the NE (Not Empty) flag.
928 * Both are safe. Because;
930 * (i) this clearing can only occur after qman_set_vdq() has set the
931 * vdqcr_owned field (which it does before setting VDQCR), and
932 * qman_volatile_dequeue() blocks interrupts and preemption while this is
933 * done so that we can't interfere.
934 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
935 * with (i) that API prevents us from interfering until it's safe.
937 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
938 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
939 * advantage comes from this function not having to "lock" anything at all.
941 * Note also that the callbacks are invoked at points which are safe against the
942 * above potential conflicts, but that this function itself is not re-entrant
943 * (this is because the function tracks one end of each FIFO in the portal and
944 * we do *not* want to lock that). So the consequence is that it is safe for
945 * user callbacks to call into any QMan API.
947 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
948 unsigned int poll_limit)
950 const struct qm_dqrr_entry *dq;
952 enum qman_cb_dqrr_result res;
953 unsigned int limit = 0;
954 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
955 struct qm_dqrr_entry *shadow;
958 qm_dqrr_pvb_update(&p->p);
959 dq = qm_dqrr_current(&p->p);
962 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
963 /* If running on an LE system the fields of the
964 * dequeue entry must be swapper. Because the
965 * QMan HW will ignore writes the DQRR entry is
966 * copied and the index stored within the copy
968 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
971 shadow->fqid = be32_to_cpu(shadow->fqid);
972 shadow->contextB = be32_to_cpu(shadow->contextB);
973 shadow->seqnum = be16_to_cpu(shadow->seqnum);
974 hw_fd_to_cpu(&shadow->fd);
977 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
979 * VDQCR: don't trust context_b as the FQ may have
980 * been configured for h/w consumption and we're
981 * draining it post-retirement.
985 * We only set QMAN_FQ_STATE_NE when retiring, so we
986 * only need to check for clearing it when doing
987 * volatile dequeues. It's one less thing to check
988 * in the critical path (SDQCR).
990 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
991 fq_clear(fq, QMAN_FQ_STATE_NE);
993 * This is duplicated from the SDQCR code, but we
994 * have stuff to do before *and* after this callback,
995 * and we don't want multiple if()s in the critical
998 res = fq->cb.dqrr(p, fq, dq);
999 if (res == qman_cb_dqrr_stop)
1001 /* Check for VDQCR completion */
1002 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1005 /* SDQCR: context_b points to the FQ */
1006 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1007 fq = get_fq_table_entry(dq->contextB);
1009 fq = (void *)(uintptr_t)dq->contextB;
1011 /* Now let the callback do its stuff */
1012 res = fq->cb.dqrr(p, fq, dq);
1014 * The callback can request that we exit without
1015 * consuming this entry nor advancing;
1017 if (res == qman_cb_dqrr_stop)
1020 /* Interpret 'dq' from a driver perspective. */
1022 * Parking isn't possible unless HELDACTIVE was set. NB,
1023 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1024 * check for HELDACTIVE to cover both.
1026 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1027 (res != qman_cb_dqrr_park));
1028 /* just means "skip it, I'll consume it myself later on" */
1029 if (res != qman_cb_dqrr_defer)
1030 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1031 res == qman_cb_dqrr_park);
1033 qm_dqrr_next(&p->p);
1035 * Entry processed and consumed, increment our counter. The
1036 * callback can request that we exit after consuming the
1037 * entry, and we also exit if we reach our processing limit,
1038 * so loop back only if neither of these conditions is met.
1040 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1045 u16 qman_affine_channel(int cpu)
1048 struct qman_portal *portal = get_affine_portal();
1050 cpu = portal->config->cpu;
1052 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1053 return affine_channels[cpu];
1056 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1058 struct qman_portal *p)
1060 const struct qm_dqrr_entry *dq;
1062 enum qman_cb_dqrr_result res;
1063 unsigned int limit = 0;
1064 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1065 struct qm_dqrr_entry *shadow;
1067 unsigned int rx_number = 0;
1070 qm_dqrr_pvb_update(&p->p);
1071 dq = qm_dqrr_current(&p->p);
1074 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1075 /* If running on an LE system the fields of the
1076 * dequeue entry must be swapper. Because the
1077 * QMan HW will ignore writes the DQRR entry is
1078 * copied and the index stored within the copy
1080 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1083 shadow->fqid = be32_to_cpu(shadow->fqid);
1084 shadow->contextB = be32_to_cpu(shadow->contextB);
1085 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1086 hw_fd_to_cpu(&shadow->fd);
1089 /* SDQCR: context_b points to the FQ */
1090 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1091 fq = get_fq_table_entry(dq->contextB);
1093 fq = (void *)(uintptr_t)dq->contextB;
1095 /* Now let the callback do its stuff */
1096 res = fq->cb.dqrr_dpdk_cb(NULL, p, fq, dq, &bufs[rx_number]);
1098 /* Interpret 'dq' from a driver perspective. */
1100 * Parking isn't possible unless HELDACTIVE was set. NB,
1101 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1102 * check for HELDACTIVE to cover both.
1104 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1105 (res != qman_cb_dqrr_park));
1106 qm_dqrr_cdc_consume_1ptr(&p->p, dq, res == qman_cb_dqrr_park);
1108 qm_dqrr_next(&p->p);
1110 * Entry processed and consumed, increment our counter. The
1111 * callback can request that we exit after consuming the
1112 * entry, and we also exit if we reach our processing limit,
1113 * so loop back only if neither of these conditions is met.
1115 } while (likely(++limit < poll_limit));
1120 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1123 const struct qm_dqrr_entry *dq;
1125 enum qman_cb_dqrr_result res;
1126 unsigned int limit = 0;
1127 struct qman_portal *p = get_affine_portal();
1128 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1129 struct qm_dqrr_entry *shadow;
1131 unsigned int rx_number = 0;
1134 qm_dqrr_pvb_update(&p->p);
1135 dq = qm_dqrr_current(&p->p);
1138 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1140 * If running on an LE system the fields of the
1141 * dequeue entry must be swapper. Because the
1142 * QMan HW will ignore writes the DQRR entry is
1143 * copied and the index stored within the copy
1145 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1148 shadow->fqid = be32_to_cpu(shadow->fqid);
1149 shadow->contextB = be32_to_cpu(shadow->contextB);
1150 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1151 hw_fd_to_cpu(&shadow->fd);
1154 /* SDQCR: context_b points to the FQ */
1155 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1156 fq = get_fq_table_entry(dq->contextB);
1158 fq = (void *)(uintptr_t)dq->contextB;
1160 /* Now let the callback do its stuff */
1161 res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
1162 dq, &bufs[rx_number]);
1164 /* Interpret 'dq' from a driver perspective. */
1166 * Parking isn't possible unless HELDACTIVE was set. NB,
1167 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1168 * check for HELDACTIVE to cover both.
1170 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1171 (res != qman_cb_dqrr_park));
1172 if (res != qman_cb_dqrr_defer)
1173 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1174 res == qman_cb_dqrr_park);
1176 qm_dqrr_next(&p->p);
1178 * Entry processed and consumed, increment our counter. The
1179 * callback can request that we exit after consuming the
1180 * entry, and we also exit if we reach our processing limit,
1181 * so loop back only if neither of these conditions is met.
1183 } while (++limit < poll_limit);
1188 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1190 struct qman_portal *p = get_affine_portal();
1191 const struct qm_dqrr_entry *dq;
1192 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1193 struct qm_dqrr_entry *shadow;
1196 qm_dqrr_pvb_update(&p->p);
1197 dq = qm_dqrr_current(&p->p);
1201 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1202 /* Invalid DQRR - put the portal and consume the DQRR.
1203 * Return NULL to user as no packet is seen.
1205 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1209 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1210 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1213 shadow->fqid = be32_to_cpu(shadow->fqid);
1214 shadow->contextB = be32_to_cpu(shadow->contextB);
1215 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1216 hw_fd_to_cpu(&shadow->fd);
1219 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1220 fq_clear(fq, QMAN_FQ_STATE_NE);
1222 return (struct qm_dqrr_entry *)dq;
1225 void qman_dqrr_consume(struct qman_fq *fq,
1226 struct qm_dqrr_entry *dq)
1228 struct qman_portal *p = get_affine_portal();
1230 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1233 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1234 qm_dqrr_next(&p->p);
1237 int qman_poll_dqrr(unsigned int limit)
1239 struct qman_portal *p = get_affine_portal();
1242 ret = __poll_portal_fast(p, limit);
1246 void qman_poll(void)
1248 struct qman_portal *p = get_affine_portal();
1250 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1251 if (!(p->slowpoll--)) {
1252 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1253 u32 active = __poll_portal_slow(p, is);
1256 qm_isr_status_clear(&p->p, active);
1257 p->slowpoll = SLOW_POLL_BUSY;
1259 p->slowpoll = SLOW_POLL_IDLE;
1262 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1263 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1266 void qman_stop_dequeues(void)
1268 struct qman_portal *p = get_affine_portal();
1270 qman_stop_dequeues_ex(p);
1273 void qman_start_dequeues(void)
1275 struct qman_portal *p = get_affine_portal();
1277 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1278 if (!(--p->dqrr_disable_ref))
1279 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1282 void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
1284 struct qman_portal *p = qp ? qp : get_affine_portal();
1286 pools &= p->config->pools;
1288 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1291 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
1293 struct qman_portal *p = qp ? qp : get_affine_portal();
1295 pools &= p->config->pools;
1297 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1300 u32 qman_static_dequeue_get(struct qman_portal *qp)
1302 struct qman_portal *p = qp ? qp : get_affine_portal();
1306 void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
1308 struct qman_portal *p = get_affine_portal();
1310 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1313 void qman_dca_index(u8 index, int park_request)
1315 struct qman_portal *p = get_affine_portal();
1317 qm_dqrr_cdc_consume_1(&p->p, index, park_request);
1320 /* Frame queue API */
1321 static const char *mcr_result_str(u8 result)
1324 case QM_MCR_RESULT_NULL:
1325 return "QM_MCR_RESULT_NULL";
1326 case QM_MCR_RESULT_OK:
1327 return "QM_MCR_RESULT_OK";
1328 case QM_MCR_RESULT_ERR_FQID:
1329 return "QM_MCR_RESULT_ERR_FQID";
1330 case QM_MCR_RESULT_ERR_FQSTATE:
1331 return "QM_MCR_RESULT_ERR_FQSTATE";
1332 case QM_MCR_RESULT_ERR_NOTEMPTY:
1333 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1334 case QM_MCR_RESULT_PENDING:
1335 return "QM_MCR_RESULT_PENDING";
1336 case QM_MCR_RESULT_ERR_BADCOMMAND:
1337 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1339 return "<unknown MCR result>";
1342 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1345 struct qm_mcr_queryfq_np np;
1346 struct qm_mc_command *mcc;
1347 struct qm_mc_result *mcr;
1348 struct qman_portal *p;
1350 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1351 int ret = qman_alloc_fqid(&fqid);
1356 spin_lock_init(&fq->fqlock);
1358 fq->fqid_le = cpu_to_be32(fqid);
1360 fq->state = qman_fq_state_oos;
1361 fq->cgr_groupid = 0;
1362 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1363 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1364 pr_info("Find empty table entry failed\n");
1368 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1370 /* Everything else is AS_IS support */
1371 p = get_affine_portal();
1372 mcc = qm_mc_start(&p->p);
1373 mcc->queryfq.fqid = cpu_to_be32(fqid);
1374 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1375 while (!(mcr = qm_mc_result(&p->p)))
1377 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1378 if (mcr->result != QM_MCR_RESULT_OK) {
1379 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1382 fqd = mcr->queryfq.fqd;
1383 hw_fqd_to_cpu(&fqd);
1384 mcc = qm_mc_start(&p->p);
1385 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1386 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1387 while (!(mcr = qm_mc_result(&p->p)))
1389 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1390 if (mcr->result != QM_MCR_RESULT_OK) {
1391 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1394 np = mcr->queryfq_np;
1395 /* Phew, have queryfq and queryfq_np results, stitch together
1396 * the FQ object from those.
1398 fq->cgr_groupid = fqd.cgid;
1399 switch (np.state & QM_MCR_NP_STATE_MASK) {
1400 case QM_MCR_NP_STATE_OOS:
1402 case QM_MCR_NP_STATE_RETIRED:
1403 fq->state = qman_fq_state_retired;
1405 fq_set(fq, QMAN_FQ_STATE_NE);
1407 case QM_MCR_NP_STATE_TEN_SCHED:
1408 case QM_MCR_NP_STATE_TRU_SCHED:
1409 case QM_MCR_NP_STATE_ACTIVE:
1410 fq->state = qman_fq_state_sched;
1411 if (np.state & QM_MCR_NP_STATE_R)
1412 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1414 case QM_MCR_NP_STATE_PARKED:
1415 fq->state = qman_fq_state_parked;
1418 DPAA_ASSERT(NULL == "invalid FQ state");
1420 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1421 fq->state |= QMAN_FQ_STATE_CGR_EN;
1424 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1425 qman_release_fqid(fqid);
1429 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1432 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1433 * quiesced. Instead, run some checks.
1435 switch (fq->state) {
1436 case qman_fq_state_parked:
1437 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1439 case qman_fq_state_oos:
1440 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1441 qman_release_fqid(fq->fqid);
1442 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1443 clear_fq_table_entry(fq->key);
1449 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1452 u32 qman_fq_fqid(struct qman_fq *fq)
1457 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1465 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1467 struct qm_mc_command *mcc;
1468 struct qm_mc_result *mcr;
1469 struct qman_portal *p;
1471 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1472 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1474 if ((fq->state != qman_fq_state_oos) &&
1475 (fq->state != qman_fq_state_parked))
1477 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1478 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1481 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1482 /* And can't be set at the same time as TDTHRESH */
1483 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1486 /* Issue an INITFQ_[PARKED|SCHED] management command */
1487 p = get_affine_portal();
1489 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1490 ((fq->state != qman_fq_state_oos) &&
1491 (fq->state != qman_fq_state_parked)))) {
1495 mcc = qm_mc_start(&p->p);
1497 mcc->initfq = *opts;
1498 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1499 mcc->initfq.count = 0;
1501 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1502 * demux pointer. Otherwise, the caller-provided value is allowed to
1503 * stand, don't overwrite it.
1505 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1508 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1509 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1510 mcc->initfq.fqd.context_b = fq->key;
1512 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1515 * and the physical address - NB, if the user wasn't trying to
1516 * set CONTEXTA, clear the stashing settings.
1518 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1519 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1520 memset(&mcc->initfq.fqd.context_a, 0,
1521 sizeof(mcc->initfq.fqd.context_a));
1523 phys_fq = rte_mem_virt2iova(fq);
1524 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1527 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1528 mcc->initfq.fqd.dest.channel = p->config->channel;
1529 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1530 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1531 mcc->initfq.fqd.dest.wq = 4;
1534 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1535 cpu_to_hw_fqd(&mcc->initfq.fqd);
1536 qm_mc_commit(&p->p, myverb);
1537 while (!(mcr = qm_mc_result(&p->p)))
1539 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1541 if (res != QM_MCR_RESULT_OK) {
1546 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1547 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1548 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1550 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1552 if (opts->we_mask & QM_INITFQ_WE_CGID)
1553 fq->cgr_groupid = opts->fqd.cgid;
1555 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1556 qman_fq_state_sched : qman_fq_state_parked;
1561 int qman_schedule_fq(struct qman_fq *fq)
1563 struct qm_mc_command *mcc;
1564 struct qm_mc_result *mcr;
1565 struct qman_portal *p;
1570 if (fq->state != qman_fq_state_parked)
1572 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1573 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1576 /* Issue a ALTERFQ_SCHED management command */
1577 p = get_affine_portal();
1580 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1581 (fq->state != qman_fq_state_parked))) {
1585 mcc = qm_mc_start(&p->p);
1586 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1587 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1588 while (!(mcr = qm_mc_result(&p->p)))
1590 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1592 if (res != QM_MCR_RESULT_OK) {
1596 fq->state = qman_fq_state_sched;
1603 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1605 struct qm_mc_command *mcc;
1606 struct qm_mc_result *mcr;
1607 struct qman_portal *p;
1612 if ((fq->state != qman_fq_state_parked) &&
1613 (fq->state != qman_fq_state_sched))
1615 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1616 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1619 p = get_affine_portal();
1622 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1623 (fq->state == qman_fq_state_retired) ||
1624 (fq->state == qman_fq_state_oos))) {
1628 rval = table_push_fq(p, fq);
1631 mcc = qm_mc_start(&p->p);
1632 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1633 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1634 while (!(mcr = qm_mc_result(&p->p)))
1636 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1639 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1640 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1641 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1642 * friendly, otherwise the caller doesn't necessarily have a fully
1643 * "retired" FQ on return even if the retirement was immediate. However
1644 * this does mean some code duplication between here and
1645 * fq_state_change().
1647 if (likely(res == QM_MCR_RESULT_OK)) {
1649 /* Process 'fq' right away, we'll ignore FQRNI */
1650 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1651 fq_set(fq, QMAN_FQ_STATE_NE);
1652 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1653 fq_set(fq, QMAN_FQ_STATE_ORL);
1655 table_del_fq(p, fq);
1658 fq->state = qman_fq_state_retired;
1661 * Another issue with supporting "immediate" retirement
1662 * is that we're forced to drop FQRNIs, because by the
1663 * time they're seen it may already be "too late" (the
1664 * fq may have been OOS'd and free()'d already). But if
1665 * the upper layer wants a callback whether it's
1666 * immediate or not, we have to fake a "MR" entry to
1667 * look like an FQRNI...
1669 struct qm_mr_entry msg;
1671 msg.verb = QM_MR_VERB_FQRNI;
1672 msg.fq.fqs = mcr->alterfq.fqs;
1673 msg.fq.fqid = fq->fqid;
1674 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1675 msg.fq.contextB = fq->key;
1677 msg.fq.contextB = (u32)(uintptr_t)fq;
1679 fq->cb.fqs(p, fq, &msg);
1681 } else if (res == QM_MCR_RESULT_PENDING) {
1683 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1686 table_del_fq(p, fq);
1693 int qman_oos_fq(struct qman_fq *fq)
1695 struct qm_mc_command *mcc;
1696 struct qm_mc_result *mcr;
1697 struct qman_portal *p;
1702 if (fq->state != qman_fq_state_retired)
1704 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1705 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1708 p = get_affine_portal();
1710 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1711 (fq->state != qman_fq_state_retired))) {
1715 mcc = qm_mc_start(&p->p);
1716 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1717 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1718 while (!(mcr = qm_mc_result(&p->p)))
1720 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1722 if (res != QM_MCR_RESULT_OK) {
1726 fq->state = qman_fq_state_oos;
1732 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1734 struct qm_mc_command *mcc;
1735 struct qm_mc_result *mcr;
1736 struct qman_portal *p;
1742 if ((fq->state == qman_fq_state_oos) ||
1743 (fq->state == qman_fq_state_retired) ||
1744 (fq->state == qman_fq_state_parked))
1747 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1748 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1751 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1752 p = get_affine_portal();
1754 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1755 (fq->state == qman_fq_state_parked) ||
1756 (fq->state == qman_fq_state_oos) ||
1757 (fq->state == qman_fq_state_retired))) {
1761 mcc = qm_mc_start(&p->p);
1762 mcc->alterfq.fqid = fq->fqid;
1763 mcc->alterfq.count = 0;
1764 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1766 qm_mc_commit(&p->p, myverb);
1767 while (!(mcr = qm_mc_result(&p->p)))
1769 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1772 if (res != QM_MCR_RESULT_OK) {
1781 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1783 struct qm_mc_command *mcc;
1784 struct qm_mc_result *mcr;
1785 struct qman_portal *p = get_affine_portal();
1789 mcc = qm_mc_start(&p->p);
1790 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1791 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1792 while (!(mcr = qm_mc_result(&p->p)))
1794 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1796 if (res == QM_MCR_RESULT_OK)
1797 *fqd = mcr->queryfq.fqd;
1799 if (res != QM_MCR_RESULT_OK)
1804 int qman_query_fq_has_pkts(struct qman_fq *fq)
1806 struct qm_mc_command *mcc;
1807 struct qm_mc_result *mcr;
1808 struct qman_portal *p = get_affine_portal();
1813 mcc = qm_mc_start(&p->p);
1814 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1815 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1816 while (!(mcr = qm_mc_result(&p->p)))
1819 if (res == QM_MCR_RESULT_OK)
1820 ret = !!mcr->queryfq_np.frm_cnt;
1824 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1826 struct qm_mc_command *mcc;
1827 struct qm_mc_result *mcr;
1828 struct qman_portal *p = get_affine_portal();
1832 mcc = qm_mc_start(&p->p);
1833 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1834 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1835 while (!(mcr = qm_mc_result(&p->p)))
1837 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1839 if (res == QM_MCR_RESULT_OK) {
1840 *np = mcr->queryfq_np;
1841 np->fqd_link = be24_to_cpu(np->fqd_link);
1842 np->odp_seq = be16_to_cpu(np->odp_seq);
1843 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1844 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1845 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1846 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1847 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1848 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1849 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1850 np->ics_surp = be16_to_cpu(np->ics_surp);
1851 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1852 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1853 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1854 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1855 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1856 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1857 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1859 if (res == QM_MCR_RESULT_ERR_FQID)
1861 else if (res != QM_MCR_RESULT_OK)
1866 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
1868 struct qm_mc_command *mcc;
1869 struct qm_mc_result *mcr;
1870 struct qman_portal *p = get_affine_portal();
1872 mcc = qm_mc_start(&p->p);
1873 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1874 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1875 while (!(mcr = qm_mc_result(&p->p)))
1877 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1879 if (mcr->result == QM_MCR_RESULT_OK)
1880 *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
1881 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
1883 else if (mcr->result != QM_MCR_RESULT_OK)
1888 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1890 struct qm_mc_command *mcc;
1891 struct qm_mc_result *mcr;
1892 struct qman_portal *p = get_affine_portal();
1896 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1897 QM_MCR_VERB_QUERYWQ;
1898 mcc = qm_mc_start(&p->p);
1899 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1900 qm_mc_commit(&p->p, myverb);
1901 while (!(mcr = qm_mc_result(&p->p)))
1903 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1905 if (res == QM_MCR_RESULT_OK) {
1908 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
1909 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
1910 for (i = 0; i < array_len; i++)
1911 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
1913 if (res != QM_MCR_RESULT_OK) {
1914 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
1920 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
1921 struct qm_mcr_cgrtestwrite *result)
1923 struct qm_mc_command *mcc;
1924 struct qm_mc_result *mcr;
1925 struct qman_portal *p = get_affine_portal();
1929 mcc = qm_mc_start(&p->p);
1930 mcc->cgrtestwrite.cgid = cgr->cgrid;
1931 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
1932 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
1933 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
1934 while (!(mcr = qm_mc_result(&p->p)))
1936 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
1938 if (res == QM_MCR_RESULT_OK)
1939 *result = mcr->cgrtestwrite;
1940 if (res != QM_MCR_RESULT_OK) {
1941 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
1947 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
1949 struct qm_mc_command *mcc;
1950 struct qm_mc_result *mcr;
1951 struct qman_portal *p = get_affine_portal();
1955 mcc = qm_mc_start(&p->p);
1956 mcc->querycgr.cgid = cgr->cgrid;
1957 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
1958 while (!(mcr = qm_mc_result(&p->p)))
1960 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
1962 if (res == QM_MCR_RESULT_OK)
1963 *cgrd = mcr->querycgr;
1964 if (res != QM_MCR_RESULT_OK) {
1965 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
1968 cgrd->cgr.wr_parm_g.word =
1969 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
1970 cgrd->cgr.wr_parm_y.word =
1971 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
1972 cgrd->cgr.wr_parm_r.word =
1973 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
1974 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
1975 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
1976 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
1977 cgrd->cscn_targ_swp[i] =
1978 be32_to_cpu(cgrd->cscn_targ_swp[i]);
1982 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
1984 struct qm_mc_result *mcr;
1985 struct qman_portal *p = get_affine_portal();
1990 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1991 while (!(mcr = qm_mc_result(&p->p)))
1993 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
1994 QM_MCC_VERB_QUERYCONGESTION);
1996 if (res == QM_MCR_RESULT_OK)
1997 *congestion = mcr->querycongestion;
1998 if (res != QM_MCR_RESULT_OK) {
1999 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
2002 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
2003 congestion->state.state[i] =
2004 be32_to_cpu(congestion->state.state[i]);
2008 int qman_set_vdq(struct qman_fq *fq, u16 num)
2010 struct qman_portal *p = get_affine_portal();
2014 vdqcr = QM_VDQCR_EXACT;
2015 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
2017 if ((fq->state != qman_fq_state_parked) &&
2018 (fq->state != qman_fq_state_retired)) {
2022 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
2026 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2028 if (!p->vdqcr_owned) {
2030 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2032 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2034 p->vdqcr_owned = fq;
2039 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2045 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
2048 struct qman_portal *p;
2051 if ((fq->state != qman_fq_state_parked) &&
2052 (fq->state != qman_fq_state_retired))
2054 if (vdqcr & QM_VDQCR_FQID_MASK)
2056 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2058 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2060 p = get_affine_portal();
2062 if (!p->vdqcr_owned) {
2064 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2066 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2068 p->vdqcr_owned = fq;
2076 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2080 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
2083 qm_eqcr_cce_prefetch(&p->p);
2085 qm_eqcr_cce_update(&p->p);
2088 int qman_eqcr_is_empty(void)
2090 struct qman_portal *p = get_affine_portal();
2093 update_eqcr_ci(p, 0);
2094 avail = qm_eqcr_get_fill(&p->p);
2095 return (avail == 0);
2098 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
2101 struct qman_portal *p = get_affine_portal();
2103 p->cb_dc_ern = handler;
2105 cb_dc_ern = handler;
2108 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
2110 const struct qm_fd *fd,
2113 struct qm_eqcr_entry *eq;
2116 if (p->use_eqcr_ci_stashing) {
2118 * The stashing case is easy, only update if we need to in
2119 * order to try and liberate ring entries.
2121 eq = qm_eqcr_start_stash(&p->p);
2124 * The non-stashing case is harder, need to prefetch ahead of
2127 avail = qm_eqcr_get_avail(&p->p);
2129 update_eqcr_ci(p, avail);
2130 eq = qm_eqcr_start_no_stash(&p->p);
2136 if (flags & QMAN_ENQUEUE_FLAG_DCA)
2137 eq->dca = QM_EQCR_DCA_ENABLE |
2138 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
2139 QM_EQCR_DCA_PARK : 0) |
2140 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
2141 eq->fqid = cpu_to_be32(fq->fqid);
2142 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2143 eq->tag = cpu_to_be32(fq->key);
2145 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2148 cpu_to_hw_fd(&eq->fd);
2152 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
2154 struct qman_portal *p = get_affine_portal();
2155 struct qm_eqcr_entry *eq;
2157 eq = try_p_eq_start(p, fq, fd, flags);
2160 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2161 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
2162 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2163 /* Factor the below out, it's used from qman_enqueue_orp() too */
2167 int qman_enqueue_multi(struct qman_fq *fq,
2168 const struct qm_fd *fd, u32 *flags,
2171 struct qman_portal *p = get_affine_portal();
2172 struct qm_portal *portal = &p->p;
2174 register struct qm_eqcr *eqcr = &portal->eqcr;
2175 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2177 u8 i = 0, diff, old_ci, sent = 0;
2179 /* Update the available entries if no entry is free */
2180 if (!eqcr->available) {
2182 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2183 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2184 eqcr->available += diff;
2189 /* try to send as many frames as possible */
2190 while (eqcr->available && frames_to_send--) {
2191 eq->fqid = fq->fqid_le;
2192 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2193 eq->tag = cpu_to_be32(fq->key);
2195 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2197 eq->fd.opaque_addr = fd->opaque_addr;
2198 eq->fd.addr = cpu_to_be40(fd->addr);
2199 eq->fd.status = cpu_to_be32(fd->status);
2200 eq->fd.opaque = cpu_to_be32(fd->opaque);
2201 if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2202 eq->dca = QM_EQCR_DCA_ENABLE |
2203 ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2206 eq = (void *)((unsigned long)(eq + 1) &
2207 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2214 /* In order for flushes to complete faster, all lines are recorded in
2218 for (i = 0; i < sent; i++) {
2219 eq->__dont_write_directly__verb =
2220 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2222 eq = (void *)((unsigned long)(eq + 1) &
2223 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2224 if (unlikely((prev_eq + 1) != eq))
2225 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2228 /* We need to flush all the lines but without load/store operations
2232 for (i = 0; i < sent; i++) {
2234 eq = (void *)((unsigned long)(eq + 1) &
2235 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2237 /* Update cursor for the next call */
2243 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
2246 struct qman_portal *p = get_affine_portal();
2247 struct qm_portal *portal = &p->p;
2249 register struct qm_eqcr *eqcr = &portal->eqcr;
2250 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2252 u8 i, diff, old_ci, sent = 0;
2254 /* Update the available entries if no entry is free */
2255 if (!eqcr->available) {
2257 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2258 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2259 eqcr->available += diff;
2264 /* try to send as many frames as possible */
2265 while (eqcr->available && frames_to_send--) {
2266 eq->fqid = fq[sent]->fqid_le;
2267 eq->fd.opaque_addr = fd->opaque_addr;
2268 eq->fd.addr = cpu_to_be40(fd->addr);
2269 eq->fd.status = cpu_to_be32(fd->status);
2270 eq->fd.opaque = cpu_to_be32(fd->opaque);
2272 eq = (void *)((unsigned long)(eq + 1) &
2273 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2280 /* In order for flushes to complete faster, all lines are recorded in
2284 for (i = 0; i < sent; i++) {
2285 eq->__dont_write_directly__verb =
2286 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2288 eq = (void *)((unsigned long)(eq + 1) &
2289 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2290 if (unlikely((prev_eq + 1) != eq))
2291 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2294 /* We need to flush all the lines but without load/store operations
2298 for (i = 0; i < sent; i++) {
2300 eq = (void *)((unsigned long)(eq + 1) &
2301 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2303 /* Update cursor for the next call */
2308 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2309 struct qman_fq *orp, u16 orp_seqnum)
2311 struct qman_portal *p = get_affine_portal();
2312 struct qm_eqcr_entry *eq;
2314 eq = try_p_eq_start(p, fq, fd, flags);
2317 /* Process ORP-specifics here */
2318 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2319 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2321 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2322 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2323 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2325 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2326 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2328 eq->seqnum = cpu_to_be16(orp_seqnum);
2329 eq->orp = cpu_to_be32(orp->fqid);
2330 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2331 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2332 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2333 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2334 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2339 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2340 struct qm_mcc_initcgr *opts)
2342 struct qm_mc_command *mcc;
2343 struct qm_mc_result *mcr;
2344 struct qman_portal *p = get_affine_portal();
2347 u8 verb = QM_MCC_VERB_MODIFYCGR;
2349 mcc = qm_mc_start(&p->p);
2351 mcc->initcgr = *opts;
2352 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2353 mcc->initcgr.cgr.wr_parm_g.word =
2354 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2355 mcc->initcgr.cgr.wr_parm_y.word =
2356 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2357 mcc->initcgr.cgr.wr_parm_r.word =
2358 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2359 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2360 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2362 mcc->initcgr.cgid = cgr->cgrid;
2363 if (flags & QMAN_CGR_FLAG_USE_INIT)
2364 verb = QM_MCC_VERB_INITCGR;
2365 qm_mc_commit(&p->p, verb);
2366 while (!(mcr = qm_mc_result(&p->p)))
2369 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2371 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2374 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2375 QM_CHANNEL_SWPORTAL0))
2376 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2377 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2379 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2380 struct qm_mcc_initcgr *opts)
2382 struct qm_mcr_querycgr cgr_state;
2383 struct qm_mcc_initcgr local_opts;
2385 struct qman_portal *p;
2387 /* We have to check that the provided CGRID is within the limits of the
2388 * data-structures, for obvious reasons. However we'll let h/w take
2389 * care of determining whether it's within the limits of what exists on
2392 if (cgr->cgrid >= __CGR_NUM)
2395 p = get_affine_portal();
2397 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2398 cgr->chan = p->config->channel;
2399 spin_lock(&p->cgr_lock);
2401 /* if no opts specified, just add it to the list */
2405 ret = qman_query_cgr(cgr, &cgr_state);
2410 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2411 local_opts.cgr.cscn_targ_upd_ctrl =
2412 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2414 /* Overwrite TARG */
2415 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2417 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2419 /* send init if flags indicate so */
2420 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2421 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2423 ret = qman_modify_cgr(cgr, 0, &local_opts);
2427 list_add(&cgr->node, &p->cgr_cbs);
2429 /* Determine if newly added object requires its callback to be called */
2430 ret = qman_query_cgr(cgr, &cgr_state);
2432 /* we can't go back, so proceed and return success, but screen
2433 * and wail to the log file.
2435 pr_crit("CGR HW state partially modified\n");
2439 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2443 spin_unlock(&p->cgr_lock);
2447 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2448 struct qm_mcc_initcgr *opts)
2450 struct qm_mcc_initcgr local_opts;
2451 struct qm_mcr_querycgr cgr_state;
2454 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2455 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2458 /* We have to check that the provided CGRID is within the limits of the
2459 * data-structures, for obvious reasons. However we'll let h/w take
2460 * care of determining whether it's within the limits of what exists on
2463 if (cgr->cgrid >= __CGR_NUM)
2466 ret = qman_query_cgr(cgr, &cgr_state);
2470 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2474 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2475 local_opts.cgr.cscn_targ_upd_ctrl =
2476 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2477 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2479 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2480 TARG_DCP_MASK(dcp_portal);
2481 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2483 /* send init if flags indicate so */
2484 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2485 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2488 ret = qman_modify_cgr(cgr, 0, &local_opts);
2493 int qman_delete_cgr(struct qman_cgr *cgr)
2495 struct qm_mcr_querycgr cgr_state;
2496 struct qm_mcc_initcgr local_opts;
2499 struct qman_portal *p = get_affine_portal();
2501 if (cgr->chan != p->config->channel) {
2502 pr_crit("Attempting to delete cgr from different portal than"
2503 " it was create: create 0x%x, delete 0x%x\n",
2504 cgr->chan, p->config->channel);
2508 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2509 spin_lock(&p->cgr_lock);
2510 list_del(&cgr->node);
2512 * If there are no other CGR objects for this CGRID in the list,
2513 * update CSCN_TARG accordingly
2515 list_for_each_entry(i, &p->cgr_cbs, node)
2516 if ((i->cgrid == cgr->cgrid) && i->cb)
2518 ret = qman_query_cgr(cgr, &cgr_state);
2520 /* add back to the list */
2521 list_add(&cgr->node, &p->cgr_cbs);
2524 /* Overwrite TARG */
2525 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2526 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2527 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2529 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2531 ret = qman_modify_cgr(cgr, 0, &local_opts);
2533 /* add back to the list */
2534 list_add(&cgr->node, &p->cgr_cbs);
2536 spin_unlock(&p->cgr_lock);
2541 int qman_shutdown_fq(u32 fqid)
2543 struct qman_portal *p;
2544 struct qm_portal *low_p;
2545 struct qm_mc_command *mcc;
2546 struct qm_mc_result *mcr;
2548 int orl_empty, fq_empty, drain = 0;
2553 p = get_affine_portal();
2556 /* Determine the state of the FQID */
2557 mcc = qm_mc_start(low_p);
2558 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2559 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2560 while (!(mcr = qm_mc_result(low_p)))
2562 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2563 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2564 if (state == QM_MCR_NP_STATE_OOS)
2565 return 0; /* Already OOS, no need to do anymore checks */
2567 /* Query which channel the FQ is using */
2568 mcc = qm_mc_start(low_p);
2569 mcc->queryfq.fqid = cpu_to_be32(fqid);
2570 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2571 while (!(mcr = qm_mc_result(low_p)))
2573 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2575 /* Need to store these since the MCR gets reused */
2576 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2577 channel = dest_wq & 0x7;
2581 case QM_MCR_NP_STATE_TEN_SCHED:
2582 case QM_MCR_NP_STATE_TRU_SCHED:
2583 case QM_MCR_NP_STATE_ACTIVE:
2584 case QM_MCR_NP_STATE_PARKED:
2586 mcc = qm_mc_start(low_p);
2587 mcc->alterfq.fqid = cpu_to_be32(fqid);
2588 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2589 while (!(mcr = qm_mc_result(low_p)))
2591 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2592 QM_MCR_VERB_ALTER_RETIRE);
2593 result = mcr->result; /* Make a copy as we reuse MCR below */
2595 if (result == QM_MCR_RESULT_PENDING) {
2596 /* Need to wait for the FQRN in the message ring, which
2597 * will only occur once the FQ has been drained. In
2598 * order for the FQ to drain the portal needs to be set
2599 * to dequeue from the channel the FQ is scheduled on
2601 const struct qm_mr_entry *msg;
2602 const struct qm_dqrr_entry *dqrr = NULL;
2604 __maybe_unused u16 dequeue_wq = 0;
2606 /* Flag that we need to drain FQ */
2609 if (channel >= qm_channel_pool1 &&
2610 channel < (u16)(qm_channel_pool1 + 15)) {
2611 /* Pool channel, enable the bit in the portal */
2612 dequeue_wq = (channel -
2613 qm_channel_pool1 + 1) << 4 | wq;
2614 } else if (channel < qm_channel_pool1) {
2615 /* Dedicated channel */
2618 pr_info("Cannot recover FQ 0x%x,"
2619 " it is scheduled on channel 0x%x",
2623 /* Set the sdqcr to drain this channel */
2624 if (channel < qm_channel_pool1)
2625 qm_dqrr_sdqcr_set(low_p,
2626 QM_SDQCR_TYPE_ACTIVE |
2627 QM_SDQCR_CHANNELS_DEDICATED);
2629 qm_dqrr_sdqcr_set(low_p,
2630 QM_SDQCR_TYPE_ACTIVE |
2631 QM_SDQCR_CHANNELS_POOL_CONV
2633 while (!found_fqrn) {
2634 /* Keep draining DQRR while checking the MR*/
2635 qm_dqrr_pvb_update(low_p);
2636 dqrr = qm_dqrr_current(low_p);
2638 qm_dqrr_cdc_consume_1ptr(
2640 qm_dqrr_pvb_update(low_p);
2641 qm_dqrr_next(low_p);
2642 dqrr = qm_dqrr_current(low_p);
2644 /* Process message ring too */
2645 qm_mr_pvb_update(low_p);
2646 msg = qm_mr_current(low_p);
2649 QM_MR_VERB_TYPE_MASK)
2653 qm_mr_cci_consume_to_current(low_p);
2654 qm_mr_pvb_update(low_p);
2655 msg = qm_mr_current(low_p);
2660 if (result != QM_MCR_RESULT_OK &&
2661 result != QM_MCR_RESULT_PENDING) {
2663 pr_err("qman_retire_fq failed on FQ 0x%x,"
2664 " result=0x%x\n", fqid, result);
2667 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2668 /* ORL had no entries, no need to wait until the
2673 /* Retirement succeeded, check to see if FQ needs
2676 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2677 /* FQ is Not Empty, drain using volatile DQ commands */
2680 const struct qm_dqrr_entry *dqrr = NULL;
2681 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2683 qm_dqrr_vdqcr_set(low_p, vdqcr);
2685 /* Wait for a dequeue to occur */
2686 while (dqrr == NULL) {
2687 qm_dqrr_pvb_update(low_p);
2688 dqrr = qm_dqrr_current(low_p);
2692 /* Process the dequeues, making sure to
2693 * empty the ring completely.
2696 if (dqrr->fqid == fqid &&
2697 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2699 qm_dqrr_cdc_consume_1ptr(low_p,
2701 qm_dqrr_pvb_update(low_p);
2702 qm_dqrr_next(low_p);
2703 dqrr = qm_dqrr_current(low_p);
2705 } while (fq_empty == 0);
2707 qm_dqrr_sdqcr_set(low_p, 0);
2709 /* Wait for the ORL to have been completely drained */
2710 while (orl_empty == 0) {
2711 const struct qm_mr_entry *msg;
2713 qm_mr_pvb_update(low_p);
2714 msg = qm_mr_current(low_p);
2716 if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
2720 qm_mr_cci_consume_to_current(low_p);
2721 qm_mr_pvb_update(low_p);
2722 msg = qm_mr_current(low_p);
2726 mcc = qm_mc_start(low_p);
2727 mcc->alterfq.fqid = cpu_to_be32(fqid);
2728 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2729 while (!(mcr = qm_mc_result(low_p)))
2731 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2732 QM_MCR_VERB_ALTER_OOS);
2733 if (mcr->result != QM_MCR_RESULT_OK) {
2735 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2741 case QM_MCR_NP_STATE_RETIRED:
2742 /* Send OOS Command */
2743 mcc = qm_mc_start(low_p);
2744 mcc->alterfq.fqid = cpu_to_be32(fqid);
2745 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2746 while (!(mcr = qm_mc_result(low_p)))
2748 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2749 QM_MCR_VERB_ALTER_OOS);
2751 pr_err("OOS Failed on FQID 0x%x\n", fqid);