1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
9 #include <rte_branch_prediction.h>
11 /* Compilation constants */
12 #define DQRR_MAXFILL 15
13 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
14 #define IRQNAME "QMan portal %d"
15 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
16 /* maximum number of DQRR entries to process in qman_poll() */
17 #define FSL_QMAN_POLL_LIMIT 8
19 /* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
20 * inter-processor locking only. Note, FQLOCK() is always called either under a
21 * local_irq_save() or from interrupt context - hence there's no need for irq
22 * protection (and indeed, attempting to nest irq-protection doesn't work, as
23 * the "irq en/disable" machinery isn't recursive...).
27 struct qman_fq *__fq478 = (fq); \
28 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
29 spin_lock(&__fq478->fqlock); \
31 #define FQUNLOCK(fq) \
33 struct qman_fq *__fq478 = (fq); \
34 if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
35 spin_unlock(&__fq478->fqlock); \
38 static inline void fq_set(struct qman_fq *fq, u32 mask)
40 dpaa_set_bits(mask, &fq->flags);
43 static inline void fq_clear(struct qman_fq *fq, u32 mask)
45 dpaa_clear_bits(mask, &fq->flags);
48 static inline int fq_isset(struct qman_fq *fq, u32 mask)
50 return fq->flags & mask;
53 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
55 return !(fq->flags & mask);
60 /* PORTAL_BITS_*** - dynamic, strictly internal */
62 /* interrupt sources processed by portal_isr(), configurable */
63 unsigned long irq_sources;
64 u32 use_eqcr_ci_stashing;
65 u32 slowpoll; /* only used when interrupts are off */
66 /* only 1 volatile dequeue at a time */
67 struct qman_fq *vdqcr_owned;
70 /* A portal-specific handler for DCP ERNs. If this is NULL, the global
71 * handler is called instead.
73 qman_cb_dc_ern cb_dc_ern;
74 /* When the cpu-affine portal is activated, this is non-NULL */
75 const struct qm_portal_config *config;
76 struct dpa_rbtree retire_table;
77 char irqname[MAX_IRQNAME];
78 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
79 struct qman_cgrs *cgrs;
80 /* linked-list of CSCN handlers. */
81 struct list_head cgr_cbs;
84 /* track if memory was allocated by the driver */
85 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
86 /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
87 * do byte swaps of DQRR read only memory. First entry must be aligned
88 * to 2 ** 10 to ensure DQRR index calculations based shadow copy
89 * address (6 bits for address shift + 4 bits for the DQRR size).
91 struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
92 __attribute__((aligned(1024)));
96 /* Global handler for DCP ERNs. Used when the portal receiving the message does
97 * not have a portal-specific handler.
99 static qman_cb_dc_ern cb_dc_ern;
101 static cpumask_t affine_mask;
102 static DEFINE_SPINLOCK(affine_mask_lock);
103 static u16 affine_channels[NR_CPUS];
104 static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
106 static inline struct qman_portal *get_affine_portal(void)
108 return &RTE_PER_LCORE(qman_affine_portal);
111 /* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
112 * retirement notifications (the fact they are sometimes h/w-consumed means that
113 * contextB isn't always a s/w demux - and as we can't know which case it is
114 * when looking at the notification, we have to use the slow lookup for all of
115 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
116 * (though at most one of them should be the consumer), so this table isn't for
117 * all FQs - FQs are added when retirement commands are issued, and removed when
118 * they complete, which also massively reduces the size of this table.
120 IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
122 * This is what everything can wait on, even if it migrates to a different cpu
123 * to the one whose affine portal it is waiting on.
125 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
127 static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
129 int ret = fqtree_push(&p->retire_table, fq);
132 pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
136 static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
138 fqtree_del(&p->retire_table, fq);
141 static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
143 return fqtree_find(&p->retire_table, fqid);
146 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
147 static void **qman_fq_lookup_table;
148 static size_t qman_fq_lookup_table_size;
150 int qman_setup_fq_lookup_table(size_t num_entries)
153 /* Allocate 1 more entry since the first entry is not used */
154 qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
155 if (!qman_fq_lookup_table) {
156 pr_err("QMan: Could not allocate fq lookup table\n");
159 memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
160 qman_fq_lookup_table_size = num_entries;
161 pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
162 qman_fq_lookup_table,
163 (unsigned long)qman_fq_lookup_table_size);
167 /* global structure that maintains fq object mapping */
168 static DEFINE_SPINLOCK(fq_hash_table_lock);
170 static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
174 spin_lock(&fq_hash_table_lock);
175 /* Can't use index zero because this has special meaning
176 * in context_b field.
178 for (i = 1; i < qman_fq_lookup_table_size; i++) {
179 if (qman_fq_lookup_table[i] == NULL) {
181 qman_fq_lookup_table[i] = fq;
182 spin_unlock(&fq_hash_table_lock);
186 spin_unlock(&fq_hash_table_lock);
190 static void clear_fq_table_entry(u32 entry)
192 spin_lock(&fq_hash_table_lock);
193 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
194 qman_fq_lookup_table[entry] = NULL;
195 spin_unlock(&fq_hash_table_lock);
198 static inline struct qman_fq *get_fq_table_entry(u32 entry)
200 DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
201 return qman_fq_lookup_table[entry];
205 static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
207 /* Byteswap the FQD to HW format */
208 fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
209 fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
210 fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
211 fqd->context_b = cpu_to_be32(fqd->context_b);
212 fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
213 fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
216 static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
218 /* Byteswap the FQD to CPU format */
219 fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
220 fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
221 fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
222 fqd->context_b = be32_to_cpu(fqd->context_b);
223 fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
226 static inline void cpu_to_hw_fd(struct qm_fd *fd)
228 fd->addr = cpu_to_be40(fd->addr);
229 fd->status = cpu_to_be32(fd->status);
230 fd->opaque = cpu_to_be32(fd->opaque);
233 static inline void hw_fd_to_cpu(struct qm_fd *fd)
235 fd->addr = be40_to_cpu(fd->addr);
236 fd->status = be32_to_cpu(fd->status);
237 fd->opaque = be32_to_cpu(fd->opaque);
240 /* In the case that slow- and fast-path handling are both done by qman_poll()
241 * (ie. because there is no interrupt handling), we ought to balance how often
242 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
243 * sources, so we call the fast poll 'n' times before calling the slow poll
244 * once. The idle decrementer constant is used when the last slow-poll detected
245 * no work to do, and the busy decrementer constant when the last slow-poll had
248 #define SLOW_POLL_IDLE 1000
249 #define SLOW_POLL_BUSY 10
250 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
251 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
252 unsigned int poll_limit);
254 /* Portal interrupt handler */
255 static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
257 struct qman_portal *p = ptr;
259 * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
260 * it could race against a Query Congestion State command also given
261 * as part of the handling of this interrupt source. We mustn't
262 * clear it a second time in this top-level function.
264 u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
265 ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
266 u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
267 /* DQRR-handling if it's interrupt-driven */
268 if (is & QM_PIRQ_DQRI)
269 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
270 /* Handling of anything else that's interrupt-driven */
271 clear |= __poll_portal_slow(p, is);
272 qm_isr_status_clear(&p->p, clear);
276 /* This inner version is used privately by qman_create_affine_portal(), as well
277 * as by the exported qman_stop_dequeues().
279 static inline void qman_stop_dequeues_ex(struct qman_portal *p)
281 if (!(p->dqrr_disable_ref++))
282 qm_dqrr_set_maxfill(&p->p, 0);
285 static int drain_mr_fqrni(struct qm_portal *p)
287 const struct qm_mr_entry *msg;
289 msg = qm_mr_current(p);
292 * if MR was full and h/w had other FQRNI entries to produce, we
293 * need to allow it time to produce those entries once the
294 * existing entries are consumed. A worst-case situation
295 * (fully-loaded system) means h/w sequencers may have to do 3-4
296 * other things before servicing the portal's MR pump, each of
297 * which (if slow) may take ~50 qman cycles (which is ~200
298 * processor cycles). So rounding up and then multiplying this
299 * worst-case estimate by a factor of 10, just to be
300 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
301 * one entry at a time, so h/w has an opportunity to produce new
302 * entries well before the ring has been fully consumed, so
303 * we're being *really* paranoid here.
305 u64 now, then = mfatb();
309 } while ((then + 10000) > now);
310 msg = qm_mr_current(p);
314 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
315 /* We aren't draining anything but FQRNIs */
316 pr_err("Found verb 0x%x in MR\n", msg->verb);
320 qm_mr_cci_consume(p, 1);
324 static inline int qm_eqcr_init(struct qm_portal *portal,
325 enum qm_eqcr_pmode pmode,
326 unsigned int eq_stash_thresh,
329 /* This use of 'register', as well as all other occurrences, is because
330 * it has been observed to generate much faster code with gcc than is
331 * otherwise the case.
333 register struct qm_eqcr *eqcr = &portal->eqcr;
337 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
338 eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
339 qm_cl_invalidate(EQCR_CI);
340 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
341 eqcr->cursor = eqcr->ring + pi;
342 eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
343 QM_EQCR_VERB_VBIT : 0;
344 eqcr->available = QM_EQCR_SIZE - 1 -
345 qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
346 eqcr->ithresh = qm_in(EQCR_ITR);
347 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
351 cfg = (qm_in(CFG) & 0x00ffffff) |
352 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
353 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
354 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
359 static inline void qm_eqcr_finish(struct qm_portal *portal)
361 register struct qm_eqcr *eqcr = &portal->eqcr;
366 * Disable EQCI stashing because the QMan only
367 * presents the value it previously stashed to
368 * maintain coherency. Setting the stash threshold
369 * to 1 then 0 ensures that QMan has resyncronized
370 * its internal copy so that the portal is clean
371 * when it is reinitialized in the future
373 cfg = (qm_in(CFG) & 0x0fffffff) |
374 (1 << 28); /* QCSP_CFG: EST */
376 cfg &= 0x0fffffff; /* stash threshold = 0 */
379 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
380 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
382 /* Refresh EQCR CI cache value */
383 qm_cl_invalidate(EQCR_CI);
384 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
386 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
387 DPAA_ASSERT(!eqcr->busy);
389 if (pi != EQCR_PTR2IDX(eqcr->cursor))
390 pr_crit("losing uncommitted EQCR entries\n");
392 pr_crit("missing existing EQCR completions\n");
393 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
394 pr_crit("EQCR destroyed unquiesced\n");
397 static inline int qm_dqrr_init(struct qm_portal *portal,
398 __maybe_unused const struct qm_portal_config *config,
399 enum qm_dqrr_dmode dmode,
400 __maybe_unused enum qm_dqrr_pmode pmode,
401 enum qm_dqrr_cmode cmode, u8 max_fill)
403 register struct qm_dqrr *dqrr = &portal->dqrr;
406 /* Make sure the DQRR will be idle when we enable */
407 qm_out(DQRR_SDQCR, 0);
408 qm_out(DQRR_VDQCR, 0);
409 qm_out(DQRR_PDQCR, 0);
410 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
411 dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
412 dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
413 dqrr->cursor = dqrr->ring + dqrr->ci;
414 dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
415 dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
416 QM_DQRR_VERB_VBIT : 0;
417 dqrr->ithresh = qm_in(DQRR_ITR);
418 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
423 /* Invalidate every ring entry before beginning */
424 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
425 dccivac(qm_cl(dqrr->ring, cfg));
426 cfg = (qm_in(CFG) & 0xff000f00) |
427 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
428 ((dmode & 1) << 18) | /* DP */
429 ((cmode & 3) << 16) | /* DCM */
431 (0 ? 0x40 : 0) | /* Ignore RP */
432 (0 ? 0x10 : 0); /* Ignore SP */
434 qm_dqrr_set_maxfill(portal, max_fill);
438 static inline void qm_dqrr_finish(struct qm_portal *portal)
440 __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
441 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
442 if ((dqrr->cmode != qm_dqrr_cdc) &&
443 (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
444 pr_crit("Ignoring completed DQRR entries\n");
448 static inline int qm_mr_init(struct qm_portal *portal,
449 __maybe_unused enum qm_mr_pmode pmode,
450 enum qm_mr_cmode cmode)
452 register struct qm_mr *mr = &portal->mr;
455 mr->ring = portal->addr.ce + QM_CL_MR;
456 mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
457 mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
458 mr->cursor = mr->ring + mr->ci;
459 mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
460 mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
461 mr->ithresh = qm_in(MR_ITR);
462 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
466 cfg = (qm_in(CFG) & 0xfffff0ff) |
467 ((cmode & 1) << 8); /* QCSP_CFG:MM */
472 static inline void qm_mr_pvb_update(struct qm_portal *portal)
474 register struct qm_mr *mr = &portal->mr;
475 const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
477 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
478 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
480 /* when accessing 'verb', use __raw_readb() to ensure that compiler
481 * inlining doesn't try to optimise out "excess reads".
483 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
484 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
486 mr->vbit ^= QM_MR_VERB_VBIT;
494 struct qman_portal *qman_create_portal(
495 struct qman_portal *portal,
496 const struct qm_portal_config *c,
497 const struct qman_cgrs *cgrs)
506 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
508 * prep the low-level portal struct with the mapped addresses from the
509 * config, everything that follows depends on it and "config" is more
512 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
513 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
515 * If CI-stashing is used, the current defaults use a threshold of 3,
516 * and stash with high-than-DQRR priority.
518 if (qm_eqcr_init(p, qm_eqcr_pvb,
519 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
520 pr_err("Qman EQCR initialisation failed\n");
523 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
524 qm_dqrr_cdc, DQRR_MAXFILL)) {
525 pr_err("Qman DQRR initialisation failed\n");
528 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
529 pr_err("Qman MR initialisation failed\n");
533 pr_err("Qman MC initialisation failed\n");
537 /* static interrupt-gating controls */
538 qm_dqrr_set_ithresh(p, 0);
539 qm_mr_set_ithresh(p, 0);
540 qm_isr_set_iperiod(p, 0);
541 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
544 /* initial snapshot is no-depletion */
545 qman_cgrs_init(&portal->cgrs[1]);
547 portal->cgrs[0] = *cgrs;
549 /* if the given mask is NULL, assume all CGRs can be seen */
550 qman_cgrs_fill(&portal->cgrs[0]);
551 INIT_LIST_HEAD(&portal->cgr_cbs);
552 spin_lock_init(&portal->cgr_lock);
554 portal->slowpoll = 0;
555 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
556 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
557 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
558 portal->dqrr_disable_ref = 0;
559 portal->cb_dc_ern = NULL;
560 sprintf(buf, "qportal-%d", c->channel);
561 dpa_rbtree_init(&portal->retire_table);
563 qm_isr_disable_write(p, isdr);
564 portal->irq_sources = 0;
565 qm_isr_enable_write(p, portal->irq_sources);
566 qm_isr_status_clear(p, 0xffffffff);
567 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
568 if (request_irq(c->irq, portal_isr, 0, portal->irqname,
570 pr_err("request_irq() failed\n");
574 /* Need EQCR to be empty before continuing */
575 isdr &= ~QM_PIRQ_EQCI;
576 qm_isr_disable_write(p, isdr);
577 ret = qm_eqcr_get_fill(p);
579 pr_err("Qman EQCR unclean\n");
580 goto fail_eqcr_empty;
582 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
583 qm_isr_disable_write(p, isdr);
584 if (qm_dqrr_current(p)) {
585 pr_err("Qman DQRR unclean\n");
586 qm_dqrr_cdc_consume_n(p, 0xffff);
588 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
589 /* special handling, drain just in case it's a few FQRNIs */
590 if (drain_mr_fqrni(p))
591 goto fail_dqrr_mr_empty;
595 qm_isr_disable_write(p, 0);
597 /* Write a sane SDQCR */
598 qm_dqrr_sdqcr_set(p, portal->sdqcr);
602 free_irq(c->irq, portal);
605 spin_lock_destroy(&portal->cgr_lock);
618 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
619 const struct qman_cgrs *cgrs)
621 struct qman_portal *res;
622 struct qman_portal *portal = get_affine_portal();
623 /* A criteria for calling this function (from qman_driver.c) is that
624 * we're already affine to the cpu and won't schedule onto another cpu.
627 res = qman_create_portal(portal, c, cgrs);
629 spin_lock(&affine_mask_lock);
630 CPU_SET(c->cpu, &affine_mask);
631 affine_channels[c->cpu] =
633 spin_unlock(&affine_mask_lock);
639 void qman_destroy_portal(struct qman_portal *qm)
641 const struct qm_portal_config *pcfg;
643 /* Stop dequeues on the portal */
644 qm_dqrr_sdqcr_set(&qm->p, 0);
647 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
648 * something related to QM_PIRQ_EQCI, this may need fixing.
649 * Also, due to the prefetching model used for CI updates in the enqueue
650 * path, this update will only invalidate the CI cacheline *after*
651 * working on it, so we need to call this twice to ensure a full update
652 * irrespective of where the enqueue processing was at when the teardown
655 qm_eqcr_cce_update(&qm->p);
656 qm_eqcr_cce_update(&qm->p);
659 free_irq(pcfg->irq, qm);
662 qm_mc_finish(&qm->p);
663 qm_mr_finish(&qm->p);
664 qm_dqrr_finish(&qm->p);
665 qm_eqcr_finish(&qm->p);
669 spin_lock_destroy(&qm->cgr_lock);
672 const struct qm_portal_config *qman_destroy_affine_portal(void)
674 /* We don't want to redirect if we're a slave, use "raw" */
675 struct qman_portal *qm = get_affine_portal();
676 const struct qm_portal_config *pcfg;
682 qman_destroy_portal(qm);
684 spin_lock(&affine_mask_lock);
685 CPU_CLR(cpu, &affine_mask);
686 spin_unlock(&affine_mask_lock);
690 int qman_get_portal_index(void)
692 struct qman_portal *p = get_affine_portal();
693 return p->config->index;
696 /* Inline helper to reduce nesting in __poll_portal_slow() */
697 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
698 const struct qm_mr_entry *msg, u8 verb)
702 case QM_MR_VERB_FQRL:
703 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
704 fq_clear(fq, QMAN_FQ_STATE_ORL);
707 case QM_MR_VERB_FQRN:
708 DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
709 (fq->state == qman_fq_state_sched));
710 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
711 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
712 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
713 fq_set(fq, QMAN_FQ_STATE_NE);
714 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
715 fq_set(fq, QMAN_FQ_STATE_ORL);
718 fq->state = qman_fq_state_retired;
720 case QM_MR_VERB_FQPN:
721 DPAA_ASSERT(fq->state == qman_fq_state_sched);
722 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
723 fq->state = qman_fq_state_parked;
728 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
730 const struct qm_mr_entry *msg;
731 struct qm_mr_entry swapped_msg;
733 if (is & QM_PIRQ_CSCI) {
734 struct qman_cgrs rr, c;
735 struct qm_mc_result *mcr;
736 struct qman_cgr *cgr;
738 spin_lock(&p->cgr_lock);
740 * The CSCI bit must be cleared _before_ issuing the
741 * Query Congestion State command, to ensure that a long
742 * CGR State Change callback cannot miss an intervening
745 qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
747 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
748 while (!(mcr = qm_mc_result(&p->p)))
750 /* mask out the ones I'm not interested in */
751 qman_cgrs_and(&rr, (const struct qman_cgrs *)
752 &mcr->querycongestion.state, &p->cgrs[0]);
753 /* check previous snapshot for delta, enter/exit congestion */
754 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
755 /* update snapshot */
756 qman_cgrs_cp(&p->cgrs[1], &rr);
757 /* Invoke callback */
758 list_for_each_entry(cgr, &p->cgr_cbs, node)
759 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
760 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
761 spin_unlock(&p->cgr_lock);
764 if (is & QM_PIRQ_EQRI) {
765 qm_eqcr_cce_update(&p->p);
766 qm_eqcr_set_ithresh(&p->p, 0);
767 wake_up(&affine_queue);
770 if (is & QM_PIRQ_MRI) {
774 qm_mr_pvb_update(&p->p);
775 msg = qm_mr_current(&p->p);
779 hw_fd_to_cpu(&swapped_msg.ern.fd);
780 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
781 /* The message is a software ERN iff the 0x20 bit is set */
784 case QM_MR_VERB_FQRNI:
785 /* nada, we drop FQRNIs on the floor */
787 case QM_MR_VERB_FQRN:
788 case QM_MR_VERB_FQRL:
789 /* Lookup in the retirement table */
790 fq = table_find_fq(p,
791 be32_to_cpu(msg->fq.fqid));
793 fq_state_change(p, fq, &swapped_msg, verb);
795 fq->cb.fqs(p, fq, &swapped_msg);
797 case QM_MR_VERB_FQPN:
799 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
800 fq = get_fq_table_entry(
801 be32_to_cpu(msg->fq.contextB));
803 fq = (void *)(uintptr_t)
804 be32_to_cpu(msg->fq.contextB);
806 fq_state_change(p, fq, msg, verb);
808 fq->cb.fqs(p, fq, &swapped_msg);
810 case QM_MR_VERB_DC_ERN:
813 p->cb_dc_ern(p, msg);
817 static int warn_once;
820 pr_crit("Leaking DCP ERNs!\n");
826 pr_crit("Invalid MR verb 0x%02x\n", verb);
829 /* Its a software ERN */
830 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
831 fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
833 fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
835 fq->cb.ern(p, fq, &swapped_msg);
841 qm_mr_cci_consume(&p->p, num);
844 * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
845 * processing. If that interrupt source has meanwhile been re-asserted,
846 * we mustn't clear it here (or in the top-level interrupt handler).
848 return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
852 * remove some slowish-path stuff from the "fast path" and make sure it isn't
855 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
857 p->vdqcr_owned = NULL;
859 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
861 wake_up(&affine_queue);
865 * The only states that would conflict with other things if they ran at the
866 * same time on the same cpu are:
868 * (i) setting/clearing vdqcr_owned, and
869 * (ii) clearing the NE (Not Empty) flag.
871 * Both are safe. Because;
873 * (i) this clearing can only occur after qman_set_vdq() has set the
874 * vdqcr_owned field (which it does before setting VDQCR), and
875 * qman_volatile_dequeue() blocks interrupts and preemption while this is
876 * done so that we can't interfere.
877 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
878 * with (i) that API prevents us from interfering until it's safe.
880 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
881 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
882 * advantage comes from this function not having to "lock" anything at all.
884 * Note also that the callbacks are invoked at points which are safe against the
885 * above potential conflicts, but that this function itself is not re-entrant
886 * (this is because the function tracks one end of each FIFO in the portal and
887 * we do *not* want to lock that). So the consequence is that it is safe for
888 * user callbacks to call into any QMan API.
890 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
891 unsigned int poll_limit)
893 const struct qm_dqrr_entry *dq;
895 enum qman_cb_dqrr_result res;
896 unsigned int limit = 0;
897 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
898 struct qm_dqrr_entry *shadow;
901 qm_dqrr_pvb_update(&p->p);
902 dq = qm_dqrr_current(&p->p);
905 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
906 /* If running on an LE system the fields of the
907 * dequeue entry must be swapper. Because the
908 * QMan HW will ignore writes the DQRR entry is
909 * copied and the index stored within the copy
911 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
914 shadow->fqid = be32_to_cpu(shadow->fqid);
915 shadow->contextB = be32_to_cpu(shadow->contextB);
916 shadow->seqnum = be16_to_cpu(shadow->seqnum);
917 hw_fd_to_cpu(&shadow->fd);
920 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
922 * VDQCR: don't trust context_b as the FQ may have
923 * been configured for h/w consumption and we're
924 * draining it post-retirement.
928 * We only set QMAN_FQ_STATE_NE when retiring, so we
929 * only need to check for clearing it when doing
930 * volatile dequeues. It's one less thing to check
931 * in the critical path (SDQCR).
933 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
934 fq_clear(fq, QMAN_FQ_STATE_NE);
936 * This is duplicated from the SDQCR code, but we
937 * have stuff to do before *and* after this callback,
938 * and we don't want multiple if()s in the critical
941 res = fq->cb.dqrr(p, fq, dq);
942 if (res == qman_cb_dqrr_stop)
944 /* Check for VDQCR completion */
945 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
948 /* SDQCR: context_b points to the FQ */
949 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
950 fq = get_fq_table_entry(dq->contextB);
952 fq = (void *)(uintptr_t)dq->contextB;
954 /* Now let the callback do its stuff */
955 res = fq->cb.dqrr(p, fq, dq);
957 * The callback can request that we exit without
958 * consuming this entry nor advancing;
960 if (res == qman_cb_dqrr_stop)
963 /* Interpret 'dq' from a driver perspective. */
965 * Parking isn't possible unless HELDACTIVE was set. NB,
966 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
967 * check for HELDACTIVE to cover both.
969 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
970 (res != qman_cb_dqrr_park));
971 /* just means "skip it, I'll consume it myself later on" */
972 if (res != qman_cb_dqrr_defer)
973 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
974 res == qman_cb_dqrr_park);
978 * Entry processed and consumed, increment our counter. The
979 * callback can request that we exit after consuming the
980 * entry, and we also exit if we reach our processing limit,
981 * so loop back only if neither of these conditions is met.
983 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
988 u16 qman_affine_channel(int cpu)
991 struct qman_portal *portal = get_affine_portal();
993 cpu = portal->config->cpu;
995 DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
996 return affine_channels[cpu];
999 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1001 struct qman_portal *p = get_affine_portal();
1002 const struct qm_dqrr_entry *dq;
1003 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1004 struct qm_dqrr_entry *shadow;
1007 qm_dqrr_pvb_update(&p->p);
1008 dq = qm_dqrr_current(&p->p);
1012 if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1013 /* Invalid DQRR - put the portal and consume the DQRR.
1014 * Return NULL to user as no packet is seen.
1016 qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1020 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1021 shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1024 shadow->fqid = be32_to_cpu(shadow->fqid);
1025 shadow->contextB = be32_to_cpu(shadow->contextB);
1026 shadow->seqnum = be16_to_cpu(shadow->seqnum);
1027 hw_fd_to_cpu(&shadow->fd);
1030 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1031 fq_clear(fq, QMAN_FQ_STATE_NE);
1033 return (struct qm_dqrr_entry *)dq;
1036 void qman_dqrr_consume(struct qman_fq *fq,
1037 struct qm_dqrr_entry *dq)
1039 struct qman_portal *p = get_affine_portal();
1041 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1044 qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1045 qm_dqrr_next(&p->p);
1048 int qman_poll_dqrr(unsigned int limit)
1050 struct qman_portal *p = get_affine_portal();
1053 ret = __poll_portal_fast(p, limit);
1057 void qman_poll(void)
1059 struct qman_portal *p = get_affine_portal();
1061 if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1062 if (!(p->slowpoll--)) {
1063 u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1064 u32 active = __poll_portal_slow(p, is);
1067 qm_isr_status_clear(&p->p, active);
1068 p->slowpoll = SLOW_POLL_BUSY;
1070 p->slowpoll = SLOW_POLL_IDLE;
1073 if ((~p->irq_sources) & QM_PIRQ_DQRI)
1074 __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1077 void qman_stop_dequeues(void)
1079 struct qman_portal *p = get_affine_portal();
1081 qman_stop_dequeues_ex(p);
1084 void qman_start_dequeues(void)
1086 struct qman_portal *p = get_affine_portal();
1088 DPAA_ASSERT(p->dqrr_disable_ref > 0);
1089 if (!(--p->dqrr_disable_ref))
1090 qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1093 void qman_static_dequeue_add(u32 pools)
1095 struct qman_portal *p = get_affine_portal();
1097 pools &= p->config->pools;
1099 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1102 void qman_static_dequeue_del(u32 pools)
1104 struct qman_portal *p = get_affine_portal();
1106 pools &= p->config->pools;
1108 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1111 u32 qman_static_dequeue_get(void)
1113 struct qman_portal *p = get_affine_portal();
1117 void qman_dca(struct qm_dqrr_entry *dq, int park_request)
1119 struct qman_portal *p = get_affine_portal();
1121 qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1124 /* Frame queue API */
1125 static const char *mcr_result_str(u8 result)
1128 case QM_MCR_RESULT_NULL:
1129 return "QM_MCR_RESULT_NULL";
1130 case QM_MCR_RESULT_OK:
1131 return "QM_MCR_RESULT_OK";
1132 case QM_MCR_RESULT_ERR_FQID:
1133 return "QM_MCR_RESULT_ERR_FQID";
1134 case QM_MCR_RESULT_ERR_FQSTATE:
1135 return "QM_MCR_RESULT_ERR_FQSTATE";
1136 case QM_MCR_RESULT_ERR_NOTEMPTY:
1137 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1138 case QM_MCR_RESULT_PENDING:
1139 return "QM_MCR_RESULT_PENDING";
1140 case QM_MCR_RESULT_ERR_BADCOMMAND:
1141 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1143 return "<unknown MCR result>";
1146 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1149 struct qm_mcr_queryfq_np np;
1150 struct qm_mc_command *mcc;
1151 struct qm_mc_result *mcr;
1152 struct qman_portal *p;
1154 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1155 int ret = qman_alloc_fqid(&fqid);
1160 spin_lock_init(&fq->fqlock);
1163 fq->state = qman_fq_state_oos;
1164 fq->cgr_groupid = 0;
1165 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1166 if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1167 pr_info("Find empty table entry failed\n");
1171 if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1173 /* Everything else is AS_IS support */
1174 p = get_affine_portal();
1175 mcc = qm_mc_start(&p->p);
1176 mcc->queryfq.fqid = cpu_to_be32(fqid);
1177 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1178 while (!(mcr = qm_mc_result(&p->p)))
1180 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1181 if (mcr->result != QM_MCR_RESULT_OK) {
1182 pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1185 fqd = mcr->queryfq.fqd;
1186 hw_fqd_to_cpu(&fqd);
1187 mcc = qm_mc_start(&p->p);
1188 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1189 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1190 while (!(mcr = qm_mc_result(&p->p)))
1192 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1193 if (mcr->result != QM_MCR_RESULT_OK) {
1194 pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1197 np = mcr->queryfq_np;
1198 /* Phew, have queryfq and queryfq_np results, stitch together
1199 * the FQ object from those.
1201 fq->cgr_groupid = fqd.cgid;
1202 switch (np.state & QM_MCR_NP_STATE_MASK) {
1203 case QM_MCR_NP_STATE_OOS:
1205 case QM_MCR_NP_STATE_RETIRED:
1206 fq->state = qman_fq_state_retired;
1208 fq_set(fq, QMAN_FQ_STATE_NE);
1210 case QM_MCR_NP_STATE_TEN_SCHED:
1211 case QM_MCR_NP_STATE_TRU_SCHED:
1212 case QM_MCR_NP_STATE_ACTIVE:
1213 fq->state = qman_fq_state_sched;
1214 if (np.state & QM_MCR_NP_STATE_R)
1215 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1217 case QM_MCR_NP_STATE_PARKED:
1218 fq->state = qman_fq_state_parked;
1221 DPAA_ASSERT(NULL == "invalid FQ state");
1223 if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1224 fq->state |= QMAN_FQ_STATE_CGR_EN;
1227 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1228 qman_release_fqid(fqid);
1232 void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1235 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1236 * quiesced. Instead, run some checks.
1238 switch (fq->state) {
1239 case qman_fq_state_parked:
1240 DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1242 case qman_fq_state_oos:
1243 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1244 qman_release_fqid(fq->fqid);
1245 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1246 clear_fq_table_entry(fq->key);
1252 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1255 u32 qman_fq_fqid(struct qman_fq *fq)
1260 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1268 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1270 struct qm_mc_command *mcc;
1271 struct qm_mc_result *mcr;
1272 struct qman_portal *p;
1274 u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1275 QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1277 if ((fq->state != qman_fq_state_oos) &&
1278 (fq->state != qman_fq_state_parked))
1280 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1281 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1284 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1285 /* And can't be set at the same time as TDTHRESH */
1286 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1289 /* Issue an INITFQ_[PARKED|SCHED] management command */
1290 p = get_affine_portal();
1292 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1293 ((fq->state != qman_fq_state_oos) &&
1294 (fq->state != qman_fq_state_parked)))) {
1298 mcc = qm_mc_start(&p->p);
1300 mcc->initfq = *opts;
1301 mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1302 mcc->initfq.count = 0;
1304 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1305 * demux pointer. Otherwise, the caller-provided value is allowed to
1306 * stand, don't overwrite it.
1308 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1311 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1312 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1313 mcc->initfq.fqd.context_b = fq->key;
1315 mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1318 * and the physical address - NB, if the user wasn't trying to
1319 * set CONTEXTA, clear the stashing settings.
1321 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1322 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1323 memset(&mcc->initfq.fqd.context_a, 0,
1324 sizeof(mcc->initfq.fqd.context_a));
1326 phys_fq = rte_mem_virt2iova(fq);
1327 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1330 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1331 mcc->initfq.fqd.dest.channel = p->config->channel;
1332 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1333 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1334 mcc->initfq.fqd.dest.wq = 4;
1337 mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1338 cpu_to_hw_fqd(&mcc->initfq.fqd);
1339 qm_mc_commit(&p->p, myverb);
1340 while (!(mcr = qm_mc_result(&p->p)))
1342 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1344 if (res != QM_MCR_RESULT_OK) {
1349 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1350 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1351 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1353 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1355 if (opts->we_mask & QM_INITFQ_WE_CGID)
1356 fq->cgr_groupid = opts->fqd.cgid;
1358 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1359 qman_fq_state_sched : qman_fq_state_parked;
1364 int qman_schedule_fq(struct qman_fq *fq)
1366 struct qm_mc_command *mcc;
1367 struct qm_mc_result *mcr;
1368 struct qman_portal *p;
1373 if (fq->state != qman_fq_state_parked)
1375 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1376 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1379 /* Issue a ALTERFQ_SCHED management command */
1380 p = get_affine_portal();
1383 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1384 (fq->state != qman_fq_state_parked))) {
1388 mcc = qm_mc_start(&p->p);
1389 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1390 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1391 while (!(mcr = qm_mc_result(&p->p)))
1393 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1395 if (res != QM_MCR_RESULT_OK) {
1399 fq->state = qman_fq_state_sched;
1406 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1408 struct qm_mc_command *mcc;
1409 struct qm_mc_result *mcr;
1410 struct qman_portal *p;
1415 if ((fq->state != qman_fq_state_parked) &&
1416 (fq->state != qman_fq_state_sched))
1418 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1419 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1422 p = get_affine_portal();
1425 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1426 (fq->state == qman_fq_state_retired) ||
1427 (fq->state == qman_fq_state_oos))) {
1431 rval = table_push_fq(p, fq);
1434 mcc = qm_mc_start(&p->p);
1435 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1436 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1437 while (!(mcr = qm_mc_result(&p->p)))
1439 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1442 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1443 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1444 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1445 * friendly, otherwise the caller doesn't necessarily have a fully
1446 * "retired" FQ on return even if the retirement was immediate. However
1447 * this does mean some code duplication between here and
1448 * fq_state_change().
1450 if (likely(res == QM_MCR_RESULT_OK)) {
1452 /* Process 'fq' right away, we'll ignore FQRNI */
1453 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1454 fq_set(fq, QMAN_FQ_STATE_NE);
1455 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1456 fq_set(fq, QMAN_FQ_STATE_ORL);
1458 table_del_fq(p, fq);
1461 fq->state = qman_fq_state_retired;
1464 * Another issue with supporting "immediate" retirement
1465 * is that we're forced to drop FQRNIs, because by the
1466 * time they're seen it may already be "too late" (the
1467 * fq may have been OOS'd and free()'d already). But if
1468 * the upper layer wants a callback whether it's
1469 * immediate or not, we have to fake a "MR" entry to
1470 * look like an FQRNI...
1472 struct qm_mr_entry msg;
1474 msg.verb = QM_MR_VERB_FQRNI;
1475 msg.fq.fqs = mcr->alterfq.fqs;
1476 msg.fq.fqid = fq->fqid;
1477 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1478 msg.fq.contextB = fq->key;
1480 msg.fq.contextB = (u32)(uintptr_t)fq;
1482 fq->cb.fqs(p, fq, &msg);
1484 } else if (res == QM_MCR_RESULT_PENDING) {
1486 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1489 table_del_fq(p, fq);
1496 int qman_oos_fq(struct qman_fq *fq)
1498 struct qm_mc_command *mcc;
1499 struct qm_mc_result *mcr;
1500 struct qman_portal *p;
1505 if (fq->state != qman_fq_state_retired)
1507 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1508 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1511 p = get_affine_portal();
1513 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1514 (fq->state != qman_fq_state_retired))) {
1518 mcc = qm_mc_start(&p->p);
1519 mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1520 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1521 while (!(mcr = qm_mc_result(&p->p)))
1523 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1525 if (res != QM_MCR_RESULT_OK) {
1529 fq->state = qman_fq_state_oos;
1535 int qman_fq_flow_control(struct qman_fq *fq, int xon)
1537 struct qm_mc_command *mcc;
1538 struct qm_mc_result *mcr;
1539 struct qman_portal *p;
1545 if ((fq->state == qman_fq_state_oos) ||
1546 (fq->state == qman_fq_state_retired) ||
1547 (fq->state == qman_fq_state_parked))
1550 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
1551 if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1554 /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1555 p = get_affine_portal();
1557 if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1558 (fq->state == qman_fq_state_parked) ||
1559 (fq->state == qman_fq_state_oos) ||
1560 (fq->state == qman_fq_state_retired))) {
1564 mcc = qm_mc_start(&p->p);
1565 mcc->alterfq.fqid = fq->fqid;
1566 mcc->alterfq.count = 0;
1567 myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1569 qm_mc_commit(&p->p, myverb);
1570 while (!(mcr = qm_mc_result(&p->p)))
1572 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1575 if (res != QM_MCR_RESULT_OK) {
1584 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1586 struct qm_mc_command *mcc;
1587 struct qm_mc_result *mcr;
1588 struct qman_portal *p = get_affine_portal();
1592 mcc = qm_mc_start(&p->p);
1593 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1594 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1595 while (!(mcr = qm_mc_result(&p->p)))
1597 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1599 if (res == QM_MCR_RESULT_OK)
1600 *fqd = mcr->queryfq.fqd;
1602 if (res != QM_MCR_RESULT_OK)
1607 int qman_query_fq_has_pkts(struct qman_fq *fq)
1609 struct qm_mc_command *mcc;
1610 struct qm_mc_result *mcr;
1611 struct qman_portal *p = get_affine_portal();
1616 mcc = qm_mc_start(&p->p);
1617 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1618 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1619 while (!(mcr = qm_mc_result(&p->p)))
1622 if (res == QM_MCR_RESULT_OK)
1623 ret = !!mcr->queryfq_np.frm_cnt;
1627 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1629 struct qm_mc_command *mcc;
1630 struct qm_mc_result *mcr;
1631 struct qman_portal *p = get_affine_portal();
1635 mcc = qm_mc_start(&p->p);
1636 mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1637 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1638 while (!(mcr = qm_mc_result(&p->p)))
1640 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1642 if (res == QM_MCR_RESULT_OK) {
1643 *np = mcr->queryfq_np;
1644 np->fqd_link = be24_to_cpu(np->fqd_link);
1645 np->odp_seq = be16_to_cpu(np->odp_seq);
1646 np->orp_nesn = be16_to_cpu(np->orp_nesn);
1647 np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
1648 np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
1649 np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1650 np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1651 np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1652 np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1653 np->ics_surp = be16_to_cpu(np->ics_surp);
1654 np->byte_cnt = be32_to_cpu(np->byte_cnt);
1655 np->frm_cnt = be24_to_cpu(np->frm_cnt);
1656 np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1657 np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1658 np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1659 np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1660 np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1662 if (res == QM_MCR_RESULT_ERR_FQID)
1664 else if (res != QM_MCR_RESULT_OK)
1669 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
1671 struct qm_mc_command *mcc;
1672 struct qm_mc_result *mcr;
1673 struct qman_portal *p = get_affine_portal();
1677 myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
1678 QM_MCR_VERB_QUERYWQ;
1679 mcc = qm_mc_start(&p->p);
1680 mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
1681 qm_mc_commit(&p->p, myverb);
1682 while (!(mcr = qm_mc_result(&p->p)))
1684 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1686 if (res == QM_MCR_RESULT_OK) {
1689 wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
1690 array_len = ARRAY_SIZE(mcr->querywq.wq_len);
1691 for (i = 0; i < array_len; i++)
1692 wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
1694 if (res != QM_MCR_RESULT_OK) {
1695 pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
1701 int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
1702 struct qm_mcr_cgrtestwrite *result)
1704 struct qm_mc_command *mcc;
1705 struct qm_mc_result *mcr;
1706 struct qman_portal *p = get_affine_portal();
1710 mcc = qm_mc_start(&p->p);
1711 mcc->cgrtestwrite.cgid = cgr->cgrid;
1712 mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
1713 mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
1714 qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
1715 while (!(mcr = qm_mc_result(&p->p)))
1717 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
1719 if (res == QM_MCR_RESULT_OK)
1720 *result = mcr->cgrtestwrite;
1721 if (res != QM_MCR_RESULT_OK) {
1722 pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
1728 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
1730 struct qm_mc_command *mcc;
1731 struct qm_mc_result *mcr;
1732 struct qman_portal *p = get_affine_portal();
1736 mcc = qm_mc_start(&p->p);
1737 mcc->querycgr.cgid = cgr->cgrid;
1738 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
1739 while (!(mcr = qm_mc_result(&p->p)))
1741 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
1743 if (res == QM_MCR_RESULT_OK)
1744 *cgrd = mcr->querycgr;
1745 if (res != QM_MCR_RESULT_OK) {
1746 pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
1749 cgrd->cgr.wr_parm_g.word =
1750 be32_to_cpu(cgrd->cgr.wr_parm_g.word);
1751 cgrd->cgr.wr_parm_y.word =
1752 be32_to_cpu(cgrd->cgr.wr_parm_y.word);
1753 cgrd->cgr.wr_parm_r.word =
1754 be32_to_cpu(cgrd->cgr.wr_parm_r.word);
1755 cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
1756 cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
1757 for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
1758 cgrd->cscn_targ_swp[i] =
1759 be32_to_cpu(cgrd->cscn_targ_swp[i]);
1763 int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
1765 struct qm_mc_result *mcr;
1766 struct qman_portal *p = get_affine_portal();
1771 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1772 while (!(mcr = qm_mc_result(&p->p)))
1774 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
1775 QM_MCC_VERB_QUERYCONGESTION);
1777 if (res == QM_MCR_RESULT_OK)
1778 *congestion = mcr->querycongestion;
1779 if (res != QM_MCR_RESULT_OK) {
1780 pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
1783 for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
1784 congestion->state.state[i] =
1785 be32_to_cpu(congestion->state.state[i]);
1789 int qman_set_vdq(struct qman_fq *fq, u16 num)
1791 struct qman_portal *p = get_affine_portal();
1795 vdqcr = QM_VDQCR_EXACT;
1796 vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
1798 if ((fq->state != qman_fq_state_parked) &&
1799 (fq->state != qman_fq_state_retired)) {
1803 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
1807 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1809 if (!p->vdqcr_owned) {
1811 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1813 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1815 p->vdqcr_owned = fq;
1820 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1826 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
1829 struct qman_portal *p;
1832 if ((fq->state != qman_fq_state_parked) &&
1833 (fq->state != qman_fq_state_retired))
1835 if (vdqcr & QM_VDQCR_FQID_MASK)
1837 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1839 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
1841 p = get_affine_portal();
1843 if (!p->vdqcr_owned) {
1845 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
1847 fq_set(fq, QMAN_FQ_STATE_VDQCR);
1849 p->vdqcr_owned = fq;
1857 qm_dqrr_vdqcr_set(&p->p, vdqcr);
1861 static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
1864 qm_eqcr_cce_prefetch(&p->p);
1866 qm_eqcr_cce_update(&p->p);
1869 int qman_eqcr_is_empty(void)
1871 struct qman_portal *p = get_affine_portal();
1874 update_eqcr_ci(p, 0);
1875 avail = qm_eqcr_get_fill(&p->p);
1876 return (avail == 0);
1879 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
1882 struct qman_portal *p = get_affine_portal();
1884 p->cb_dc_ern = handler;
1886 cb_dc_ern = handler;
1889 static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
1891 const struct qm_fd *fd,
1894 struct qm_eqcr_entry *eq;
1897 if (p->use_eqcr_ci_stashing) {
1899 * The stashing case is easy, only update if we need to in
1900 * order to try and liberate ring entries.
1902 eq = qm_eqcr_start_stash(&p->p);
1905 * The non-stashing case is harder, need to prefetch ahead of
1908 avail = qm_eqcr_get_avail(&p->p);
1910 update_eqcr_ci(p, avail);
1911 eq = qm_eqcr_start_no_stash(&p->p);
1917 if (flags & QMAN_ENQUEUE_FLAG_DCA)
1918 eq->dca = QM_EQCR_DCA_ENABLE |
1919 ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
1920 QM_EQCR_DCA_PARK : 0) |
1921 ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
1922 eq->fqid = cpu_to_be32(fq->fqid);
1923 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1924 eq->tag = cpu_to_be32(fq->key);
1926 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1929 cpu_to_hw_fd(&eq->fd);
1933 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
1935 struct qman_portal *p = get_affine_portal();
1936 struct qm_eqcr_entry *eq;
1938 eq = try_p_eq_start(p, fq, fd, flags);
1941 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
1942 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
1943 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
1944 /* Factor the below out, it's used from qman_enqueue_orp() too */
1948 int qman_enqueue_multi(struct qman_fq *fq,
1949 const struct qm_fd *fd,
1952 struct qman_portal *p = get_affine_portal();
1953 struct qm_portal *portal = &p->p;
1955 register struct qm_eqcr *eqcr = &portal->eqcr;
1956 struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
1958 u8 i, diff, old_ci, sent = 0;
1960 /* Update the available entries if no entry is free */
1961 if (!eqcr->available) {
1963 eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
1964 diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
1965 eqcr->available += diff;
1970 /* try to send as many frames as possible */
1971 while (eqcr->available && frames_to_send--) {
1972 eq->fqid = cpu_to_be32(fq->fqid);
1973 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1974 eq->tag = cpu_to_be32(fq->key);
1976 eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
1978 eq->fd.opaque_addr = fd->opaque_addr;
1979 eq->fd.addr = cpu_to_be40(fd->addr);
1980 eq->fd.status = cpu_to_be32(fd->status);
1981 eq->fd.opaque = cpu_to_be32(fd->opaque);
1983 eq = (void *)((unsigned long)(eq + 1) &
1984 (~(unsigned long)(QM_EQCR_SIZE << 6)));
1991 /* In order for flushes to complete faster, all lines are recorded in
1995 for (i = 0; i < sent; i++) {
1996 eq->__dont_write_directly__verb =
1997 QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
1999 eq = (void *)((unsigned long)(eq + 1) &
2000 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2001 if (unlikely((prev_eq + 1) != eq))
2002 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2005 /* We need to flush all the lines but without load/store operations
2009 for (i = 0; i < sent; i++) {
2011 eq = (void *)((unsigned long)(eq + 1) &
2012 (~(unsigned long)(QM_EQCR_SIZE << 6)));
2014 /* Update cursor for the next call */
2019 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2020 struct qman_fq *orp, u16 orp_seqnum)
2022 struct qman_portal *p = get_affine_portal();
2023 struct qm_eqcr_entry *eq;
2025 eq = try_p_eq_start(p, fq, fd, flags);
2028 /* Process ORP-specifics here */
2029 if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2030 orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2032 orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2033 if (flags & QMAN_ENQUEUE_FLAG_NESN)
2034 orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2036 /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2037 orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2039 eq->seqnum = cpu_to_be16(orp_seqnum);
2040 eq->orp = cpu_to_be32(orp->fqid);
2041 /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2042 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2043 ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2044 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2045 (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2050 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2051 struct qm_mcc_initcgr *opts)
2053 struct qm_mc_command *mcc;
2054 struct qm_mc_result *mcr;
2055 struct qman_portal *p = get_affine_portal();
2058 u8 verb = QM_MCC_VERB_MODIFYCGR;
2060 mcc = qm_mc_start(&p->p);
2062 mcc->initcgr = *opts;
2063 mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2064 mcc->initcgr.cgr.wr_parm_g.word =
2065 cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2066 mcc->initcgr.cgr.wr_parm_y.word =
2067 cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2068 mcc->initcgr.cgr.wr_parm_r.word =
2069 cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2070 mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2071 mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2073 mcc->initcgr.cgid = cgr->cgrid;
2074 if (flags & QMAN_CGR_FLAG_USE_INIT)
2075 verb = QM_MCC_VERB_INITCGR;
2076 qm_mc_commit(&p->p, verb);
2077 while (!(mcr = qm_mc_result(&p->p)))
2080 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2082 return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2085 #define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2086 QM_CHANNEL_SWPORTAL0))
2087 #define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2088 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2090 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2091 struct qm_mcc_initcgr *opts)
2093 struct qm_mcr_querycgr cgr_state;
2094 struct qm_mcc_initcgr local_opts;
2096 struct qman_portal *p;
2098 /* We have to check that the provided CGRID is within the limits of the
2099 * data-structures, for obvious reasons. However we'll let h/w take
2100 * care of determining whether it's within the limits of what exists on
2103 if (cgr->cgrid >= __CGR_NUM)
2106 p = get_affine_portal();
2108 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2109 cgr->chan = p->config->channel;
2110 spin_lock(&p->cgr_lock);
2112 /* if no opts specified, just add it to the list */
2116 ret = qman_query_cgr(cgr, &cgr_state);
2121 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2122 local_opts.cgr.cscn_targ_upd_ctrl =
2123 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2125 /* Overwrite TARG */
2126 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2128 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2130 /* send init if flags indicate so */
2131 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2132 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2134 ret = qman_modify_cgr(cgr, 0, &local_opts);
2138 list_add(&cgr->node, &p->cgr_cbs);
2140 /* Determine if newly added object requires its callback to be called */
2141 ret = qman_query_cgr(cgr, &cgr_state);
2143 /* we can't go back, so proceed and return success, but screen
2144 * and wail to the log file.
2146 pr_crit("CGR HW state partially modified\n");
2150 if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2154 spin_unlock(&p->cgr_lock);
2158 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2159 struct qm_mcc_initcgr *opts)
2161 struct qm_mcc_initcgr local_opts;
2162 struct qm_mcr_querycgr cgr_state;
2165 if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2166 pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2169 /* We have to check that the provided CGRID is within the limits of the
2170 * data-structures, for obvious reasons. However we'll let h/w take
2171 * care of determining whether it's within the limits of what exists on
2174 if (cgr->cgrid >= __CGR_NUM)
2177 ret = qman_query_cgr(cgr, &cgr_state);
2181 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2185 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2186 local_opts.cgr.cscn_targ_upd_ctrl =
2187 QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2188 QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2190 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2191 TARG_DCP_MASK(dcp_portal);
2192 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2194 /* send init if flags indicate so */
2195 if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2196 ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2199 ret = qman_modify_cgr(cgr, 0, &local_opts);
2204 int qman_delete_cgr(struct qman_cgr *cgr)
2206 struct qm_mcr_querycgr cgr_state;
2207 struct qm_mcc_initcgr local_opts;
2210 struct qman_portal *p = get_affine_portal();
2212 if (cgr->chan != p->config->channel) {
2213 pr_crit("Attempting to delete cgr from different portal than"
2214 " it was create: create 0x%x, delete 0x%x\n",
2215 cgr->chan, p->config->channel);
2219 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2220 spin_lock(&p->cgr_lock);
2221 list_del(&cgr->node);
2223 * If there are no other CGR objects for this CGRID in the list,
2224 * update CSCN_TARG accordingly
2226 list_for_each_entry(i, &p->cgr_cbs, node)
2227 if ((i->cgrid == cgr->cgrid) && i->cb)
2229 ret = qman_query_cgr(cgr, &cgr_state);
2231 /* add back to the list */
2232 list_add(&cgr->node, &p->cgr_cbs);
2235 /* Overwrite TARG */
2236 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2237 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2238 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2240 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2242 ret = qman_modify_cgr(cgr, 0, &local_opts);
2244 /* add back to the list */
2245 list_add(&cgr->node, &p->cgr_cbs);
2247 spin_unlock(&p->cgr_lock);
2252 int qman_shutdown_fq(u32 fqid)
2254 struct qman_portal *p;
2255 struct qm_portal *low_p;
2256 struct qm_mc_command *mcc;
2257 struct qm_mc_result *mcr;
2259 int orl_empty, fq_empty, drain = 0;
2264 p = get_affine_portal();
2267 /* Determine the state of the FQID */
2268 mcc = qm_mc_start(low_p);
2269 mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2270 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2271 while (!(mcr = qm_mc_result(low_p)))
2273 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2274 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2275 if (state == QM_MCR_NP_STATE_OOS)
2276 return 0; /* Already OOS, no need to do anymore checks */
2278 /* Query which channel the FQ is using */
2279 mcc = qm_mc_start(low_p);
2280 mcc->queryfq.fqid = cpu_to_be32(fqid);
2281 qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2282 while (!(mcr = qm_mc_result(low_p)))
2284 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2286 /* Need to store these since the MCR gets reused */
2287 dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2288 channel = dest_wq & 0x7;
2292 case QM_MCR_NP_STATE_TEN_SCHED:
2293 case QM_MCR_NP_STATE_TRU_SCHED:
2294 case QM_MCR_NP_STATE_ACTIVE:
2295 case QM_MCR_NP_STATE_PARKED:
2297 mcc = qm_mc_start(low_p);
2298 mcc->alterfq.fqid = cpu_to_be32(fqid);
2299 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2300 while (!(mcr = qm_mc_result(low_p)))
2302 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2303 QM_MCR_VERB_ALTER_RETIRE);
2304 result = mcr->result; /* Make a copy as we reuse MCR below */
2306 if (result == QM_MCR_RESULT_PENDING) {
2307 /* Need to wait for the FQRN in the message ring, which
2308 * will only occur once the FQ has been drained. In
2309 * order for the FQ to drain the portal needs to be set
2310 * to dequeue from the channel the FQ is scheduled on
2312 const struct qm_mr_entry *msg;
2313 const struct qm_dqrr_entry *dqrr = NULL;
2315 __maybe_unused u16 dequeue_wq = 0;
2317 /* Flag that we need to drain FQ */
2320 if (channel >= qm_channel_pool1 &&
2321 channel < (u16)(qm_channel_pool1 + 15)) {
2322 /* Pool channel, enable the bit in the portal */
2323 dequeue_wq = (channel -
2324 qm_channel_pool1 + 1) << 4 | wq;
2325 } else if (channel < qm_channel_pool1) {
2326 /* Dedicated channel */
2329 pr_info("Cannot recover FQ 0x%x,"
2330 " it is scheduled on channel 0x%x",
2334 /* Set the sdqcr to drain this channel */
2335 if (channel < qm_channel_pool1)
2336 qm_dqrr_sdqcr_set(low_p,
2337 QM_SDQCR_TYPE_ACTIVE |
2338 QM_SDQCR_CHANNELS_DEDICATED);
2340 qm_dqrr_sdqcr_set(low_p,
2341 QM_SDQCR_TYPE_ACTIVE |
2342 QM_SDQCR_CHANNELS_POOL_CONV
2344 while (!found_fqrn) {
2345 /* Keep draining DQRR while checking the MR*/
2346 qm_dqrr_pvb_update(low_p);
2347 dqrr = qm_dqrr_current(low_p);
2349 qm_dqrr_cdc_consume_1ptr(
2351 qm_dqrr_pvb_update(low_p);
2352 qm_dqrr_next(low_p);
2353 dqrr = qm_dqrr_current(low_p);
2355 /* Process message ring too */
2356 qm_mr_pvb_update(low_p);
2357 msg = qm_mr_current(low_p);
2360 QM_MR_VERB_TYPE_MASK)
2364 qm_mr_cci_consume_to_current(low_p);
2365 qm_mr_pvb_update(low_p);
2366 msg = qm_mr_current(low_p);
2371 if (result != QM_MCR_RESULT_OK &&
2372 result != QM_MCR_RESULT_PENDING) {
2374 pr_err("qman_retire_fq failed on FQ 0x%x,"
2375 " result=0x%x\n", fqid, result);
2378 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2379 /* ORL had no entries, no need to wait until the
2384 /* Retirement succeeded, check to see if FQ needs
2387 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2388 /* FQ is Not Empty, drain using volatile DQ commands */
2391 const struct qm_dqrr_entry *dqrr = NULL;
2392 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2394 qm_dqrr_vdqcr_set(low_p, vdqcr);
2396 /* Wait for a dequeue to occur */
2397 while (dqrr == NULL) {
2398 qm_dqrr_pvb_update(low_p);
2399 dqrr = qm_dqrr_current(low_p);
2403 /* Process the dequeues, making sure to
2404 * empty the ring completely.
2407 if (dqrr->fqid == fqid &&
2408 dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2410 qm_dqrr_cdc_consume_1ptr(low_p,
2412 qm_dqrr_pvb_update(low_p);
2413 qm_dqrr_next(low_p);
2414 dqrr = qm_dqrr_current(low_p);
2416 } while (fq_empty == 0);
2418 qm_dqrr_sdqcr_set(low_p, 0);
2420 /* Wait for the ORL to have been completely drained */
2421 while (orl_empty == 0) {
2422 const struct qm_mr_entry *msg;
2424 qm_mr_pvb_update(low_p);
2425 msg = qm_mr_current(low_p);
2427 if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
2431 qm_mr_cci_consume_to_current(low_p);
2432 qm_mr_pvb_update(low_p);
2433 msg = qm_mr_current(low_p);
2437 mcc = qm_mc_start(low_p);
2438 mcc->alterfq.fqid = cpu_to_be32(fqid);
2439 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2440 while (!(mcr = qm_mc_result(low_p)))
2442 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2443 QM_MCR_VERB_ALTER_OOS);
2444 if (mcr->result != QM_MCR_RESULT_OK) {
2446 "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2452 case QM_MCR_NP_STATE_RETIRED:
2453 /* Send OOS Command */
2454 mcc = qm_mc_start(low_p);
2455 mcc->alterfq.fqid = cpu_to_be32(fqid);
2456 qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2457 while (!(mcr = qm_mc_result(low_p)))
2459 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2460 QM_MCR_VERB_ALTER_OOS);
2462 pr_err("OOS Failed on FQID 0x%x\n", fqid);