2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright 2008-2012 Freescale Semiconductor, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * * Neither the name of the above-listed copyright holders nor the
17 * names of any contributors may be used to endorse or promote products
18 * derived from this software without specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
47 #include <dpaa_rbtree.h>
49 /* FQ lookups (turn this on for 64bit user-space) */
50 #if (__WORDSIZE == 64)
51 #define CONFIG_FSL_QMAN_FQ_LOOKUP
52 /* if FQ lookups are supported, this controls the number of initialised,
53 * s/w-consumed FQs that can be supported at any one time.
55 #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
58 /* Last updated for v00.800 of the BG */
60 /* Hardware constants */
61 #define QM_CHANNEL_SWPORTAL0 0
62 #define QMAN_CHANNEL_POOL1 0x21
63 #define QMAN_CHANNEL_CAAM 0x80
64 #define QMAN_CHANNEL_PME 0xa0
65 #define QMAN_CHANNEL_POOL1_REV3 0x401
66 #define QMAN_CHANNEL_CAAM_REV3 0x840
67 #define QMAN_CHANNEL_PME_REV3 0x860
68 extern u16 qm_channel_pool1;
69 extern u16 qm_channel_caam;
70 extern u16 qm_channel_pme;
72 qm_dc_portal_fman0 = 0,
73 qm_dc_portal_fman1 = 1,
74 qm_dc_portal_caam = 2,
78 /* Portal processing (interrupt) sources */
79 #define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
80 #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
81 #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
82 #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
83 #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
84 #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
86 * This mask contains all the interrupt sources that need handling except DQRI,
87 * ie. that if present should trigger slow-path processing.
89 #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
90 QM_PIRQ_MRI | QM_PIRQ_CCSCI)
92 /* For qman_static_dequeue_*** APIs */
93 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
95 #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
96 /* for conversion from n of qm_channel */
97 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
99 return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
102 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
103 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
104 * FQID(n) to fill in the frame queue ID.
106 #define QM_VDQCR_PRECEDENCE_VDQCR 0x0
107 #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
108 #define QM_VDQCR_EXACT 0x40000000
109 #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
110 #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
111 #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
112 #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
114 /* --- QMan data structures (and associated constants) --- */
116 /* Represents s/w corenet portal mapped data structures */
117 struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
118 struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
119 struct qm_mr_entry; /* MR (Message Ring) entries */
120 struct qm_mc_command; /* MC (Management Command) command */
121 struct qm_mc_result; /* MC result */
123 #define QM_FD_FORMAT_SG 0x4
124 #define QM_FD_FORMAT_LONG 0x2
125 #define QM_FD_FORMAT_COMPOUND 0x1
128 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
129 * scatter-gather table. 'big' implies a 29-bit length with no offset
130 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
131 * implies a s/g-like table, where each entry itself represents a frame
132 * (contiguous or scatter-gather) and the 29-bit "length" is
133 * interpreted purely for congestion calculations, ie. a "congestion
137 qm_fd_contig_big = QM_FD_FORMAT_LONG,
138 qm_fd_sg = QM_FD_FORMAT_SG,
139 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
140 qm_fd_compound = QM_FD_FORMAT_COMPOUND
143 /* Capitalised versions are un-typed but can be used in static expressions */
144 #define QM_FD_CONTIG 0
145 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
146 #define QM_FD_SG QM_FD_FORMAT_SG
147 #define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
148 #define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
150 /* "Frame Descriptor (FD)" */
154 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
155 u8 dd:2; /* dynamic debug */
157 u8 bpid:8; /* Buffer Pool ID */
160 u8 addr_hi; /* high 8-bits of 40-bit address */
161 u32 addr_lo; /* low 32-bits of 40-bit address */
164 u8 dd:2; /* dynamic debug */
165 u8 bpid:8; /* Buffer Pool ID */
168 u8 addr_hi; /* high 8-bits of 40-bit address */
169 u32 addr_lo; /* low 32-bits of 40-bit address */
174 /* More efficient address accessor */
179 /* The 'format' field indicates the interpretation of the remaining 29
180 * bits of the 32-bit word. For packing reasons, it is duplicated in the
181 * other union elements. Note, union'd structs are difficult to use with
182 * static initialisation under gcc, in which case use the "opaque" form
183 * with one of the macros.
186 /* For easier/faster copying of this part of the fd (eg. from a
187 * DQRR entry to an EQCR entry) copy 'opaque'
190 /* If 'format' is _contig or _sg, 20b length and 9b offset */
192 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
193 enum qm_fd_format format:3;
199 enum qm_fd_format format:3;
202 /* If 'format' is _contig_big or _sg_big, 29b length */
204 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
205 enum qm_fd_format _format1:3;
209 enum qm_fd_format _format1:3;
212 /* If 'format' is _compound, 29b "congestion weight" */
214 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
215 enum qm_fd_format _format2:3;
219 enum qm_fd_format _format2:3;
227 } __attribute__((aligned(8)));
228 #define QM_FD_DD_NULL 0x00
229 #define QM_FD_PID_MASK 0x3f
230 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
235 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
237 return (dma_addr_t)fd->addr;
240 /* Macro, so we compile better if 'v' isn't always 64-bit */
241 #define qm_fd_addr_set64(fd, v) \
243 struct qm_fd *__fd931 = (fd); \
247 /* Scatter/Gather table entry */
251 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
253 u8 addr_hi; /* high 8-bits of 40-bit address */
254 u32 addr_lo; /* low 32-bits of 40-bit address */
256 u32 addr_lo; /* low 32-bits of 40-bit address */
257 u8 addr_hi; /* high 8-bits of 40-bit address */
262 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
274 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
275 u32 extension:1; /* Extension bit */
276 u32 final:1; /* Final bit */
280 u32 final:1; /* Final bit */
281 u32 extension:1; /* Extension bit */
290 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
301 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
306 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
308 return (dma_addr_t)sg->addr;
311 /* Macro, so we compile better if 'v' isn't always 64-bit */
312 #define qm_sg_entry_set64(sg, v) \
314 struct qm_sg_entry *__sg931 = (sg); \
318 /* See 1.5.8.1: "Enqueue Command" */
319 struct qm_eqcr_entry {
320 u8 __dont_write_directly__verb;
323 u32 orp; /* 24-bit */
324 u32 fqid; /* 24-bit */
331 /* "Frame Dequeue Response" */
332 struct qm_dqrr_entry {
335 u16 seqnum; /* 15-bit */
338 u32 fqid; /* 24-bit */
344 #define QM_DQRR_VERB_VBIT 0x80
345 #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
346 #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
347 #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
348 #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
349 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
350 #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
351 #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
352 #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
355 /* "ERN Message Response" */
356 /* "FQ State Change Notification" */
363 u8 rc; /* Rejection Code */
365 u32 fqid; /* 24-bit */
370 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
371 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
373 enum qm_dc_portal portal:2;
375 enum qm_dc_portal portal:3;
377 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
380 u8 rc; /* Rejection Code */
382 u32 fqid; /* 24-bit */
387 u8 fqs; /* Frame Queue Status */
389 u32 fqid; /* 24-bit */
392 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
396 #define QM_MR_VERB_VBIT 0x80
398 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
399 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
400 * from the other MR types by noting if the 0x20 bit is unset.
402 #define QM_MR_VERB_TYPE_MASK 0x27
403 #define QM_MR_VERB_DC_ERN 0x20
404 #define QM_MR_VERB_FQRN 0x21
405 #define QM_MR_VERB_FQRNI 0x22
406 #define QM_MR_VERB_FQRL 0x23
407 #define QM_MR_VERB_FQPN 0x24
408 #define QM_MR_RC_MASK 0xf0 /* contains one of; */
409 #define QM_MR_RC_CGR_TAILDROP 0x00
410 #define QM_MR_RC_WRED 0x10
411 #define QM_MR_RC_ERROR 0x20
412 #define QM_MR_RC_ORPWINDOW_EARLY 0x30
413 #define QM_MR_RC_ORPWINDOW_LATE 0x40
414 #define QM_MR_RC_FQ_TAILDROP 0x50
415 #define QM_MR_RC_ORPWINDOW_RETIRED 0x60
416 #define QM_MR_RC_ORP_ZERO 0x70
417 #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
418 #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
419 #define QM_MR_DCERN_COLOUR_GREEN 0x00
420 #define QM_MR_DCERN_COLOUR_YELLOW 0x01
421 #define QM_MR_DCERN_COLOUR_RED 0x02
422 #define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
424 * An identical structure of FQD fields is present in the "Init FQ" command and
425 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
426 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
427 * latter has two inlines to assist with converting to/from the mant+exp
430 struct qm_fqd_stashing {
431 /* See QM_STASHING_EXCL_<...> */
432 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
435 /* Numbers of cachelines */
447 struct qm_fqd_taildrop {
448 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
459 /* "Overhead Accounting Control", see QM_OAC_<...> */
460 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
461 u8 oac:2; /* "Overhead Accounting Control" */
465 u8 oac:2; /* "Overhead Accounting Control" */
467 /* Two's-complement value (-128 to +127) */
468 signed char oal; /* "Overhead Accounting Length" */
474 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
488 u16 fq_ctrl; /* See QM_FQCTRL_<...> */
492 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
493 u16 channel:13; /* qm_channel */
497 u16 channel:13; /* qm_channel */
501 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
509 * For "Initialize Frame Queue" commands, the write-enable mask
510 * determines whether 'td' or 'oac_init' is observed. For query
511 * commands, this field is always 'td', and 'oac_query' (below) reflects
512 * the Overhead ACcounting values.
516 struct qm_fqd_taildrop td;
517 struct qm_fqd_oac oac_init;
521 /* Treat it as 64-bit opaque */
524 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
532 /* Treat it as s/w portal stashing config */
533 /* see "FQD Context_A field used for [...]" */
535 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
536 struct qm_fqd_stashing stashing;
538 * 48-bit address of FQ context to
539 * stash, must be cacheline-aligned
546 struct qm_fqd_stashing stashing;
550 struct qm_fqd_oac oac_query;
552 /* 64-bit converters for context_hi/lo */
553 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
555 return ((u64)fqd->context_a.context_hi << 32) |
556 (u64)fqd->context_a.context_lo;
559 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
561 return (dma_addr_t)qm_fqd_stashing_get64(fqd);
564 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
566 return ((u64)fqd->context_a.hi << 32) |
567 (u64)fqd->context_a.lo;
570 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
572 fqd->context_a.context_hi = upper_32_bits(addr);
573 fqd->context_a.context_lo = lower_32_bits(addr);
576 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
578 fqd->context_a.hi = upper_32_bits(addr);
579 fqd->context_a.lo = lower_32_bits(addr);
582 /* convert a threshold value into mant+exp representation */
583 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
589 if (val > 0xe0000000)
595 if (roundup && oddbit)
603 /* and the other direction */
604 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
606 return (u32)td->mant << td->exp;
610 /* See "Frame Queue Descriptor (FQD)" */
611 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
612 #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
613 #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
614 #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
615 #define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
616 #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
617 #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
618 #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
619 #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
620 #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
621 #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
622 #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
624 /* See "FQD Context_A field used for [...] */
625 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
626 #define QM_STASHING_EXCL_ANNOTATION 0x04
627 #define QM_STASHING_EXCL_DATA 0x02
628 #define QM_STASHING_EXCL_CTX 0x01
630 /* See "Intra Class Scheduling" */
631 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
632 #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
633 #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
636 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
637 * and associated commands/responses. The WRED parameters are calculated from
638 * these fields as follows;
639 * MaxTH = MA * (2 ^ Mn)
640 * Slope = SA / (2 ^ Sn)
641 * MaxP = 4 * (Pn + 1)
643 struct qm_cgr_wr_parm {
647 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
650 u32 SA:7; /* must be between 64-127 */
656 u32 SA:7; /* must be between 64-127 */
664 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
665 * management commands, this is padded to a 16-bit structure field, so that's
666 * how we represent it here. The congestion state threshold is calculated from
667 * these fields as follows;
668 * CS threshold = TA * (2 ^ Tn)
670 struct qm_cgr_cs_thres {
674 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
687 * This identical structure of CGR fields is present in the "Init/Modify CGR"
688 * commands and the "Query CGR" result. It's suctioned out here into its own
692 struct qm_cgr_wr_parm wr_parm_g;
693 struct qm_cgr_wr_parm wr_parm_y;
694 struct qm_cgr_wr_parm wr_parm_r;
695 u8 wr_en_g; /* boolean, use QM_CGR_EN */
696 u8 wr_en_y; /* boolean, use QM_CGR_EN */
697 u8 wr_en_r; /* boolean, use QM_CGR_EN */
698 u8 cscn_en; /* boolean, use QM_CGR_EN */
701 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
702 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
703 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
705 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
706 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
709 u32 cscn_targ; /* use QM_CGR_TARG_* */
711 u8 cstd_en; /* boolean, use QM_CGR_EN */
712 u8 cs; /* boolean, only used in query response */
714 struct qm_cgr_cs_thres cs_thres;
715 /* use qm_cgr_cs_thres_set64() */
718 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
720 #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
721 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
722 #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
723 #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
724 #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
725 #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
726 /* Convert CGR thresholds to/from "cs_thres" format */
727 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
729 return (u64)th->TA << th->Tn;
732 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
742 if (roundup && oddbit)
750 /* See 1.5.8.5.1: "Initialize FQ" */
751 /* See 1.5.8.5.2: "Query FQ" */
752 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
753 /* See 1.5.8.5.4: "Alter FQ State Commands " */
754 /* See 1.5.8.6.1: "Initialize/Modify CGR" */
755 /* See 1.5.8.6.2: "CGR Test Write" */
756 /* See 1.5.8.6.3: "Query CGR" */
757 /* See 1.5.8.6.4: "Query Congestion Group State" */
758 struct qm_mcc_initfq {
760 u16 we_mask; /* Write Enable Mask */
761 u32 fqid; /* 24-bit */
762 u16 count; /* Initialises 'count+1' FQDs */
763 struct qm_fqd fqd; /* the FQD fields go here */
766 struct qm_mcc_queryfq {
768 u32 fqid; /* 24-bit */
771 struct qm_mcc_queryfq_np {
773 u32 fqid; /* 24-bit */
776 struct qm_mcc_alterfq {
778 u32 fqid; /* 24-bit */
780 u8 count; /* number of consecutive FQID */
782 u32 context_b; /* frame queue context b */
785 struct qm_mcc_initcgr {
787 u16 we_mask; /* Write Enable Mask */
788 struct __qm_mc_cgr cgr; /* CGR fields */
793 struct qm_mcc_cgrtestwrite {
795 u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
796 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
801 struct qm_mcc_querycgr {
806 struct qm_mcc_querycongestion {
809 struct qm_mcc_querywq {
811 /* select channel if verb != QUERYWQ_DEDICATED */
813 u16 channel_wq; /* ignores wq (3 lsbits) */
815 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
816 u16 id:13; /* qm_channel */
820 u16 id:13; /* qm_channel */
827 struct qm_mc_command {
828 u8 __dont_write_directly__verb;
830 struct qm_mcc_initfq initfq;
831 struct qm_mcc_queryfq queryfq;
832 struct qm_mcc_queryfq_np queryfq_np;
833 struct qm_mcc_alterfq alterfq;
834 struct qm_mcc_initcgr initcgr;
835 struct qm_mcc_cgrtestwrite cgrtestwrite;
836 struct qm_mcc_querycgr querycgr;
837 struct qm_mcc_querycongestion querycongestion;
838 struct qm_mcc_querywq querywq;
842 /* INITFQ-specific flags */
843 #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
844 #define QM_INITFQ_WE_OAC 0x0100
845 #define QM_INITFQ_WE_ORPC 0x0080
846 #define QM_INITFQ_WE_CGID 0x0040
847 #define QM_INITFQ_WE_FQCTRL 0x0020
848 #define QM_INITFQ_WE_DESTWQ 0x0010
849 #define QM_INITFQ_WE_ICSCRED 0x0008
850 #define QM_INITFQ_WE_TDTHRESH 0x0004
851 #define QM_INITFQ_WE_CONTEXTB 0x0002
852 #define QM_INITFQ_WE_CONTEXTA 0x0001
853 /* INITCGR/MODIFYCGR-specific flags */
854 #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
855 #define QM_CGR_WE_WR_PARM_G 0x0400
856 #define QM_CGR_WE_WR_PARM_Y 0x0200
857 #define QM_CGR_WE_WR_PARM_R 0x0100
858 #define QM_CGR_WE_WR_EN_G 0x0080
859 #define QM_CGR_WE_WR_EN_Y 0x0040
860 #define QM_CGR_WE_WR_EN_R 0x0020
861 #define QM_CGR_WE_CSCN_EN 0x0010
862 #define QM_CGR_WE_CSCN_TARG 0x0008
863 #define QM_CGR_WE_CSTD_EN 0x0004
864 #define QM_CGR_WE_CS_THRES 0x0002
865 #define QM_CGR_WE_MODE 0x0001
867 struct qm_mcr_initfq {
870 struct qm_mcr_queryfq {
872 struct qm_fqd fqd; /* the FQD fields are here */
875 struct qm_mcr_queryfq_np {
877 u8 state; /* QM_MCR_NP_STATE_*** */
878 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
905 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
906 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
908 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
909 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
910 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
947 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
948 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
950 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
951 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
952 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
956 struct qm_mcr_alterfq {
957 u8 fqs; /* Frame Queue Status */
960 struct qm_mcr_initcgr {
963 struct qm_mcr_cgrtestwrite {
965 struct __qm_mc_cgr cgr; /* CGR fields */
968 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
969 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
971 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
972 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
973 u16 lgt; /* Last Group Tick */
979 struct qm_mcr_querycgr {
981 struct __qm_mc_cgr cgr; /* CGR fields */
985 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
987 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
988 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
990 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
991 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
999 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1001 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
1002 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
1004 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
1005 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
1012 u32 cscn_targ_swp[4];
1017 struct __qm_mcr_querycongestion {
1021 struct qm_mcr_querycongestion {
1023 /* Access this struct using QM_MCR_QUERYCONGESTION() */
1024 struct __qm_mcr_querycongestion state;
1026 struct qm_mcr_querywq {
1028 u16 channel_wq; /* ignores wq (3 lsbits) */
1030 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1031 u16 id:13; /* qm_channel */
1035 u16 id:13; /* qm_channel */
1043 struct qm_mc_result {
1047 struct qm_mcr_initfq initfq;
1048 struct qm_mcr_queryfq queryfq;
1049 struct qm_mcr_queryfq_np queryfq_np;
1050 struct qm_mcr_alterfq alterfq;
1051 struct qm_mcr_initcgr initcgr;
1052 struct qm_mcr_cgrtestwrite cgrtestwrite;
1053 struct qm_mcr_querycgr querycgr;
1054 struct qm_mcr_querycongestion querycongestion;
1055 struct qm_mcr_querywq querywq;
1059 #define QM_MCR_VERB_RRID 0x80
1060 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
1061 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
1062 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
1063 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
1064 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
1065 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
1066 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
1067 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
1068 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
1069 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
1070 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
1071 #define QM_MCR_RESULT_NULL 0x00
1072 #define QM_MCR_RESULT_OK 0xf0
1073 #define QM_MCR_RESULT_ERR_FQID 0xf1
1074 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
1075 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
1076 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
1077 #define QM_MCR_RESULT_PENDING 0xf8
1078 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
1079 #define QM_MCR_NP_STATE_FE 0x10
1080 #define QM_MCR_NP_STATE_R 0x08
1081 #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
1082 #define QM_MCR_NP_STATE_OOS 0x00
1083 #define QM_MCR_NP_STATE_RETIRED 0x01
1084 #define QM_MCR_NP_STATE_TEN_SCHED 0x02
1085 #define QM_MCR_NP_STATE_TRU_SCHED 0x03
1086 #define QM_MCR_NP_STATE_PARKED 0x04
1087 #define QM_MCR_NP_STATE_ACTIVE 0x05
1088 #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
1089 #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
1090 #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
1091 #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
1092 #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
1093 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
1094 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
1095 /* This extracts the state for congestion group 'n' from a query response.
1098 * struct qm_mc_result *res = [...];
1099 * printf("congestion group %d congestion state: %d\n", cgr,
1100 * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1102 #define __CGR_WORD(num) (num >> 5)
1103 #define __CGR_SHIFT(num) (num & 0x1f)
1104 #define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
1105 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1108 return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1111 /* Portal and Frame Queues */
1112 /* Represents a managed portal */
1116 * This object type represents QMan frame queue descriptors (FQD), it is
1117 * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1118 * defined further down.
1123 * This object type represents a QMan congestion group, it is defined further
1129 * This enum, and the callback type that returns it, are used when handling
1130 * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1131 * portal object (for handling dequeues that do not demux because context_b is
1132 * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1134 enum qman_cb_dqrr_result {
1135 /* DQRR entry can be consumed */
1136 qman_cb_dqrr_consume,
1137 /* Like _consume, but requests parking - FQ must be held-active */
1139 /* Does not consume, for DCA mode only. This allows out-of-order
1140 * consumes by explicit calls to qman_dca() and/or the use of implicit
1141 * DCA via EQCR entries.
1145 * Stop processing without consuming this ring entry. Exits the current
1146 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1147 * an interrupt handler, the callback would typically call
1148 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1149 * otherwise the interrupt will reassert immediately.
1152 /* Like qman_cb_dqrr_stop, but consumes the current entry. */
1153 qman_cb_dqrr_consume_stop
1156 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1158 const struct qm_dqrr_entry *dqrr);
1161 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1162 * are always consumed after the callback returns.
1164 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1165 const struct qm_mr_entry *msg);
1167 /* This callback type is used when handling DCP ERNs */
1168 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1169 const struct qm_mr_entry *msg);
1171 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1172 * held-active + held-suspended are just "sched". Things like "retired" will not
1173 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1174 * then, to indicate it's completing and to gate attempts to retry the retire
1175 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1176 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1177 * index rather than the FQ that ring entry corresponds to), so repeated park
1178 * commands are allowed (if you're silly enough to try) but won't change FQ
1179 * state, and the resulting park notifications move FQs from "sched" to
1182 enum qman_fq_state {
1184 qman_fq_state_parked,
1185 qman_fq_state_sched,
1186 qman_fq_state_retired
1191 * Frame queue objects (struct qman_fq) are stored within memory passed to
1192 * qman_create_fq(), as this allows stashing of caller-provided demux callback
1193 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1194 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1197 * (a) extend the qman_fq structure with their state; eg.
1199 * // myfq is allocated and driver_fq callbacks filled in;
1201 * struct qman_fq base;
1202 * int an_extra_field;
1203 * [ ... add other fields to be associated with each FQ ...]
1204 * } *myfq = some_my_fq_allocator();
1205 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1207 * // in a dequeue callback, access extra fields from 'fq' via a cast;
1208 * struct my_fq *myfq = (struct my_fq *)fq;
1209 * do_something_with(myfq->an_extra_field);
1212 * (b) when and if configuring the FQ for context stashing, specify how ever
1213 * many cachelines are required to stash 'struct my_fq', to accelerate not
1214 * only the QMan driver but the callback as well.
1218 qman_cb_dqrr dqrr; /* for dequeued frames */
1219 qman_cb_mr ern; /* for s/w ERNs */
1220 qman_cb_mr fqs; /* frame-queue state changes*/
1224 /* Caller of qman_create_fq() provides these demux callbacks */
1225 struct qman_fq_cb cb;
1227 * These are internal to the driver, don't touch. In particular, they
1228 * may change, be removed, or extended (so you shouldn't rely on
1229 * sizeof(qman_fq) being a constant).
1233 /* DPDK Interface */
1236 volatile unsigned long flags;
1237 enum qman_fq_state state;
1239 struct rb_node node;
1240 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1246 * This callback type is used when handling congestion group entry/exit.
1247 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1249 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1250 struct qman_cgr *cgr, int congested);
1253 /* Set these prior to qman_create_cgr() */
1254 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1256 /* These are private to the driver */
1257 u16 chan; /* portal channel this object is created on */
1258 struct list_head node;
1261 /* Flags to qman_create_fq() */
1262 #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
1263 #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
1264 #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
1265 #define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
1266 #define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
1267 #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
1269 /* Flags to qman_destroy_fq() */
1270 #define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
1272 /* Flags from qman_fq_state() */
1273 #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
1274 #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
1275 #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
1276 #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
1277 #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
1278 #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
1280 /* Flags to qman_init_fq() */
1281 #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
1282 #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
1284 /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
1285 * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
1286 * any change here should be audited in PME.)
1288 #define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
1289 #define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
1290 #define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
1291 #define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
1292 (((u32)(p) << 2) & 0x00000f00)
1293 #define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
1294 #define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
1295 #define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
1296 #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
1297 /* For the ORP-specific qman_enqueue_orp() variant;
1298 * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
1301 #define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
1302 /* - this flag performs no enqueue but fills in an ORP sequence number that
1303 * would otherwise block it (eg. if a frame has been dropped).
1305 #define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
1306 /* - this flag performs no enqueue but advances NESN to the given sequence
1309 #define QMAN_ENQUEUE_FLAG_NESN 0x04000000
1311 /* Flags to qman_modify_cgr() */
1312 #define QMAN_CGR_FLAG_USE_INIT 0x00000001
1313 #define QMAN_CGR_MODE_FRAME 0x00000001
1316 * qman_get_portal_index - get portal configuration index
1318 int qman_get_portal_index(void);
1321 * qman_affine_channel - return the channel ID of an portal
1322 * @cpu: the cpu whose affine portal is the subject of the query
1324 * If @cpu is -1, the affine portal for the current CPU will be used. It is a
1325 * bug to call this function for any value of @cpu (other than -1) that is not a
1326 * member of the cpu mask.
1328 u16 qman_affine_channel(int cpu);
1331 * qman_set_vdq - Issue a volatile dequeue command
1332 * @fq: Frame Queue on which the volatile dequeue command is issued
1333 * @num: Number of Frames requested for volatile dequeue
1335 * This function will issue a volatile dequeue command to the QMAN.
1337 int qman_set_vdq(struct qman_fq *fq, u16 num);
1340 * qman_dequeue - Get the DQRR entry after volatile dequeue command
1341 * @fq: Frame Queue on which the volatile dequeue command is issued
1343 * This function will return the DQRR entry after a volatile dequeue command
1344 * is issued. It will keep returning NULL until there is no packet available on
1347 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
1350 * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
1351 * @fq: Frame Queue on which the volatile dequeue command is issued
1352 * @dq: DQRR entry to consume. This is the one which is provided by the
1353 * 'qbman_dequeue' command.
1355 * This will consume the DQRR enrey and make it available for next volatile
1358 void qman_dqrr_consume(struct qman_fq *fq,
1359 struct qm_dqrr_entry *dq);
1362 * qman_poll_dqrr - process DQRR (fast-path) entries
1363 * @limit: the maximum number of DQRR entries to process
1365 * Use of this function requires that DQRR processing not be interrupt-driven.
1366 * Ie. the value returned by qman_irqsource_get() should not include
1367 * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
1368 * this function will return -EINVAL, otherwise the return value is >=0 and
1369 * represents the number of DQRR entries processed.
1371 int qman_poll_dqrr(unsigned int limit);
1376 * Dispatcher logic on a cpu can use this to trigger any maintenance of the
1377 * affine portal. There are two classes of portal processing in question;
1378 * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
1379 * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
1380 * thresholds, congestion state changes, etc). This function does whatever
1381 * processing is not triggered by interrupts.
1383 * Note, if DQRR and some slow-path processing are poll-driven (rather than
1384 * interrupt-driven) then this function uses a heuristic to determine how often
1385 * to run slow-path processing - as slow-path processing introduces at least a
1386 * minimum latency each time it is run, whereas fast-path (DQRR) processing is
1387 * close to zero-cost if there is no work to be done.
1389 void qman_poll(void);
1392 * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
1394 * Disables DQRR processing of the portal. This is reference-counted, so
1395 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1396 * truly re-enable dequeuing.
1398 void qman_stop_dequeues(void);
1401 * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
1403 * Enables DQRR processing of the portal. This is reference-counted, so
1404 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1405 * truly re-enable dequeuing.
1407 void qman_start_dequeues(void);
1410 * qman_static_dequeue_add - Add pool channels to the portal SDQCR
1411 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1413 * Adds a set of pool channels to the portal's static dequeue command register
1414 * (SDQCR). The requested pools are limited to those the portal has dequeue
1417 void qman_static_dequeue_add(u32 pools);
1420 * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
1421 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1423 * Removes a set of pool channels from the portal's static dequeue command
1424 * register (SDQCR). The requested pools are limited to those the portal has
1425 * dequeue access to.
1427 void qman_static_dequeue_del(u32 pools);
1430 * qman_static_dequeue_get - return the portal's current SDQCR
1432 * Returns the portal's current static dequeue command register (SDQCR). The
1433 * entire register is returned, so if only the currently-enabled pool channels
1434 * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
1436 u32 qman_static_dequeue_get(void);
1439 * qman_dca - Perform a Discrete Consumption Acknowledgment
1440 * @dq: the DQRR entry to be consumed
1441 * @park_request: indicates whether the held-active @fq should be parked
1443 * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1444 * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1445 * does not take a 'portal' argument but implies the core affine portal from the
1446 * cpu that is currently executing the function. For reasons of locking, this
1447 * function must be called from the same CPU as that which processed the DQRR
1448 * entry in the first place.
1450 void qman_dca(struct qm_dqrr_entry *dq, int park_request);
1453 * qman_eqcr_is_empty - Determine if portal's EQCR is empty
1455 * For use in situations where a cpu-affine caller needs to determine when all
1456 * enqueues for the local portal have been processed by Qman but can't use the
1457 * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
1458 * The function forces tracking of EQCR consumption (which normally doesn't
1459 * happen until enqueue processing needs to find space to put new enqueue
1460 * commands), and returns zero if the ring still has unprocessed entries,
1461 * non-zero if it is empty.
1463 int qman_eqcr_is_empty(void);
1466 * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
1467 * @handler: callback for processing DCP ERNs
1468 * @affine: whether this handler is specific to the locally affine portal
1470 * If a hardware block's interface to Qman (ie. its direct-connect portal, or
1471 * DCP) is configured not to receive enqueue rejections, then any enqueues
1472 * through that DCP that are rejected will be sent to a given software portal.
1473 * If @affine is non-zero, then this handler will only be used for DCP ERNs
1474 * received on the portal affine to the current CPU. If multiple CPUs share a
1475 * portal and they all call this function, they will be setting the handler for
1476 * the same portal! If @affine is zero, then this handler will be global to all
1477 * portals handled by this instance of the driver. Only those portals that do
1478 * not have their own affine handler will use the global handler.
1480 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
1485 * qman_create_fq - Allocates a FQ
1486 * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
1487 * @flags: bit-mask of QMAN_FQ_FLAG_*** options
1488 * @fq: memory for storing the 'fq', with callbacks filled in
1490 * Creates a frame queue object for the given @fqid, unless the
1491 * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
1492 * dynamically allocated (or the function fails if none are available). Once
1493 * created, the caller should not touch the memory at 'fq' except as extended to
1494 * adjacent memory for user-defined fields (see the definition of "struct
1495 * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
1496 * pre-existing frame-queues that aren't to be otherwise interfered with, it
1497 * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
1498 * causes the driver to honour any contextB modifications requested in the
1499 * qm_init_fq() API, as this indicates the frame queue will be consumed by a
1500 * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
1501 * software portals, the contextB field is controlled by the driver and can't be
1502 * modified by the caller. If the AS_IS flag is specified, management commands
1503 * will be used on portal @p to query state for frame queue @fqid and construct
1504 * a frame queue object based on that, rather than assuming/requiring that it be
1507 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
1510 * qman_destroy_fq - Deallocates a FQ
1511 * @fq: the frame queue object to release
1512 * @flags: bit-mask of QMAN_FQ_FREE_*** options
1514 * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
1515 * not deallocated but the caller regains ownership, to do with as desired. The
1516 * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
1517 * is specified, in which case it may also be in the 'parked' state.
1519 void qman_destroy_fq(struct qman_fq *fq, u32 flags);
1522 * qman_fq_fqid - Queries the frame queue ID of a FQ object
1523 * @fq: the frame queue object to query
1525 u32 qman_fq_fqid(struct qman_fq *fq);
1528 * qman_fq_state - Queries the state of a FQ object
1529 * @fq: the frame queue object to query
1530 * @state: pointer to state enum to return the FQ scheduling state
1531 * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
1533 * Queries the state of the FQ object, without performing any h/w commands.
1534 * This captures the state, as seen by the driver, at the time the function
1537 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
1540 * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
1541 * @fq: the frame queue object to modify, must be 'parked' or new.
1542 * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
1543 * @opts: the FQ-modification settings, as defined in the low-level API
1545 * The @opts parameter comes from the low-level portal API. Select
1546 * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
1547 * rather than parked. NB, @opts can be NULL.
1549 * Note that some fields and options within @opts may be ignored or overwritten
1551 * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1552 * affects one frame queue: @fq).
1553 * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1554 * 'fqd' structure's 'context_b' field are sometimes overwritten;
1555 * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1556 * initialised to a value used by the driver for demux.
1557 * - if context_b is initialised for demux, so is context_a in case stashing
1558 * is requested (see item 4).
1559 * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1561 * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1562 * 'dest::channel' field will be overwritten to match the portal used to issue
1563 * the command. If the WE_DESTWQ write-enable bit had already been set by the
1564 * caller, the channel workqueue will be left as-is, otherwise the write-enable
1565 * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1566 * isn't set, the destination channel/workqueue fields and the write-enable bit
1568 * 4. if the driver overwrites context_a/b for demux, then if
1569 * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1570 * context_a.address fields and will leave the stashing fields provided by the
1571 * user alone, otherwise it will zero out the context_a.stashing fields.
1573 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1576 * qman_schedule_fq - Schedules a FQ
1577 * @fq: the frame queue object to schedule, must be 'parked'
1579 * Schedules the frame queue, which must be Parked, which takes it to
1580 * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1582 int qman_schedule_fq(struct qman_fq *fq);
1585 * qman_retire_fq - Retires a FQ
1586 * @fq: the frame queue object to retire
1587 * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
1589 * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1590 * the retirement was started asynchronously, otherwise it returns negative for
1591 * failure. When this function returns zero, @flags is set to indicate whether
1592 * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1593 * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1594 * FQRN message shows up on the portal's message ring.
1596 * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1597 * Active state), the completion will be via the message ring as a FQRN - but
1598 * the corresponding callback may occur before this function returns!! Ie. the
1599 * caller should be prepared to accept the callback as the function is called,
1600 * not only once it has returned.
1602 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1605 * qman_oos_fq - Puts a FQ "out of service"
1606 * @fq: the frame queue object to be put out-of-service, must be 'retired'
1608 * The frame queue must be retired and empty, and if any order restoration list
1609 * was released as ERNs at the time of retirement, they must all be consumed.
1611 int qman_oos_fq(struct qman_fq *fq);
1614 * qman_fq_flow_control - Set the XON/XOFF state of a FQ
1615 * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
1616 * or 'retired' or 'parked' state
1617 * @xon: boolean to set fq in XON or XOFF state
1619 * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
1620 * otherwise the IFSI interrupt will be asserted.
1622 int qman_fq_flow_control(struct qman_fq *fq, int xon);
1625 * qman_query_fq - Queries FQD fields (via h/w query command)
1626 * @fq: the frame queue object to be queried
1627 * @fqd: storage for the queried FQD fields
1629 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
1632 * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
1633 * if packets are in the frame queue. If there are no packets on frame
1634 * queue '0' is returned.
1635 * @fq: the frame queue object to be queried
1637 int qman_query_fq_has_pkts(struct qman_fq *fq);
1640 * qman_query_fq_np - Queries non-programmable FQD fields
1641 * @fq: the frame queue object to be queried
1642 * @np: storage for the queried FQD fields
1644 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1647 * qman_query_wq - Queries work queue lengths
1648 * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
1649 * to this software portal. Otherwise, query length of WQs in a
1650 * channel specified in wq.
1651 * @wq: storage for the queried WQs lengths. Also specified the channel to
1652 * to query if query_dedicated is zero.
1654 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
1657 * qman_volatile_dequeue - Issue a volatile dequeue command
1658 * @fq: the frame queue object to dequeue from
1659 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1660 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1662 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1663 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1664 * the VDQCR is already in use, otherwise returns non-zero for failure. If
1665 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1666 * the VDQCR command has finished executing (ie. once the callback for the last
1667 * DQRR entry resulting from the VDQCR command has been called). If not using
1668 * the FINISH flag, completion can be determined either by detecting the
1669 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1670 * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
1671 * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
1672 * "flags" retrieved from qman_fq_state().
1674 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1677 * qman_enqueue - Enqueue a frame to a frame queue
1678 * @fq: the frame queue object to enqueue to
1679 * @fd: a descriptor of the frame to be enqueued
1680 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1682 * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1683 * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1684 * field is ignored. The return value is non-zero on error, such as ring full
1685 * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
1686 * specified), etc. If the ring is full and FLAG_WAIT is specified, this
1687 * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
1688 * interrupt will assert when Qman consumes the EQCR entry (subject to "status
1689 * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
1690 * perform an implied "discrete consumption acknowledgment" on the dequeue
1691 * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
1692 * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
1693 * this implicit DCA can delay the release of a "held active" frame queue
1694 * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
1695 * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
1696 * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
1697 * acknowledgment should "park request" the "held active" frame queue. Ie.
1698 * when the portal eventually releases that frame queue, it will be left in the
1699 * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
1700 * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
1701 * is requested, and the FQ is a member of a congestion group, then this
1702 * function returns -EAGAIN if the congestion group is currently congested.
1703 * Note, this does not eliminate ERNs, as the async interface means we can be
1704 * sending enqueue commands to an un-congested FQ that becomes congested before
1705 * the enqueue commands are processed, but it does minimise needless thrashing
1706 * of an already busy hardware resource by throttling many of the to-be-dropped
1707 * enqueues "at the source".
1709 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
1711 int qman_enqueue_multi(struct qman_fq *fq,
1712 const struct qm_fd *fd,
1713 int frames_to_send);
1715 typedef int (*qman_cb_precommit) (void *arg);
1718 * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
1719 * @fq: the frame queue object to enqueue to
1720 * @fd: a descriptor of the frame to be enqueued
1721 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1722 * @orp: the frame queue object used as an order restoration point.
1723 * @orp_seqnum: the sequence number of this frame in the order restoration path
1725 * Similar to qman_enqueue(), but with the addition of an Order Restoration
1726 * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
1727 * enqueue operation to employ order restoration. Each frame queue object acts
1728 * as an Order Definition Point (ODP) by providing each frame dequeued from it
1729 * with an incrementing sequence number, this value is generally ignored unless
1730 * that sequence of dequeued frames will need order restoration later. Each
1731 * frame queue object also encapsulates an Order Restoration Point (ORP), which
1732 * is a re-assembly context for re-ordering frames relative to their sequence
1733 * numbers as they are enqueued. The ORP does not have to be within the frame
1734 * queue that receives the enqueued frame, in fact it is usually the frame
1735 * queue from which the frames were originally dequeued. For the purposes of
1736 * order restoration, multiple frames (or "fragments") can be enqueued for a
1737 * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
1738 * enqueues except the final fragment of a given sequence number. Ordering
1739 * between sequence numbers is guaranteed, even if fragments of different
1740 * sequence numbers are interlaced with one another. Fragments of the same
1741 * sequence number will retain the order in which they are enqueued. If no
1742 * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
1743 * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
1744 * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
1745 * sequence number should become the ORP's "Next Expected Sequence Number".
1747 * Side note: a frame queue object can be used purely as an ORP, without
1748 * carrying any frames at all. Care should be taken not to deallocate a frame
1749 * queue object that is being actively used as an ORP, as a future allocation
1750 * of the frame queue object may start using the internal ORP before the
1751 * previous use has finished.
1753 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1754 struct qman_fq *orp, u16 orp_seqnum);
1757 * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1758 * @result: is set by the API to the base FQID of the allocated range
1759 * @count: the number of FQIDs required
1760 * @align: required alignment of the allocated range
1761 * @partial: non-zero if the API can return fewer than @count FQIDs
1763 * Returns the number of frame queues allocated, or a negative error code. If
1764 * @partial is non zero, the allocation request may return a smaller range of
1765 * FQs than requested (though alignment will be as requested). If @partial is
1766 * zero, the return value will either be 'count' or negative.
1768 int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
1769 static inline int qman_alloc_fqid(u32 *result)
1771 int ret = qman_alloc_fqid_range(result, 1, 0, 0);
1773 return (ret > 0) ? 0 : ret;
1777 * qman_release_fqid_range - Release the specified range of frame queue IDs
1778 * @fqid: the base FQID of the range to deallocate
1779 * @count: the number of FQIDs in the range
1781 * This function can also be used to seed the allocator with ranges of FQIDs
1782 * that it can subsequently allocate from.
1784 void qman_release_fqid_range(u32 fqid, unsigned int count);
1785 static inline void qman_release_fqid(u32 fqid)
1787 qman_release_fqid_range(fqid, 1);
1790 void qman_seed_fqid_range(u32 fqid, unsigned int count);
1792 int qman_shutdown_fq(u32 fqid);
1795 * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
1796 * @fqid: the base FQID of the range to deallocate
1797 * @count: the number of FQIDs in the range
1799 int qman_reserve_fqid_range(u32 fqid, unsigned int count);
1800 static inline int qman_reserve_fqid(u32 fqid)
1802 return qman_reserve_fqid_range(fqid, 1);
1805 /* Pool-channel management */
1807 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1808 * @result: is set by the API to the base pool-channel ID of the allocated range
1809 * @count: the number of pool-channel IDs required
1810 * @align: required alignment of the allocated range
1811 * @partial: non-zero if the API can return fewer than @count
1813 * Returns the number of pool-channel IDs allocated, or a negative error code.
1814 * If @partial is non zero, the allocation request may return a smaller range of
1815 * than requested (though alignment will be as requested). If @partial is zero,
1816 * the return value will either be 'count' or negative.
1818 int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
1819 static inline int qman_alloc_pool(u32 *result)
1821 int ret = qman_alloc_pool_range(result, 1, 0, 0);
1823 return (ret > 0) ? 0 : ret;
1827 * qman_release_pool_range - Release the specified range of pool-channel IDs
1828 * @id: the base pool-channel ID of the range to deallocate
1829 * @count: the number of pool-channel IDs in the range
1831 void qman_release_pool_range(u32 id, unsigned int count);
1832 static inline void qman_release_pool(u32 id)
1834 qman_release_pool_range(id, 1);
1838 * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
1839 * @id: the base pool-channel ID of the range to reserve
1840 * @count: the number of pool-channel IDs in the range
1842 int qman_reserve_pool_range(u32 id, unsigned int count);
1843 static inline int qman_reserve_pool(u32 id)
1845 return qman_reserve_pool_range(id, 1);
1848 void qman_seed_pool_range(u32 id, unsigned int count);
1850 /* CGR management */
1851 /* -------------- */
1853 * qman_create_cgr - Register a congestion group object
1854 * @cgr: the 'cgr' object, with fields filled in
1855 * @flags: QMAN_CGR_FLAG_* values
1856 * @opts: optional state of CGR settings
1858 * Registers this object to receiving congestion entry/exit callbacks on the
1859 * portal affine to the cpu portal on which this API is executed. If opts is
1860 * NULL then only the callback (cgr->cb) function is registered. If @flags
1861 * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1862 * any unspecified parameters) will be used rather than a modify hw hardware
1863 * (which only modifies the specified parameters).
1865 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1866 struct qm_mcc_initcgr *opts);
1869 * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
1870 * @cgr: the 'cgr' object, with fields filled in
1871 * @flags: QMAN_CGR_FLAG_* values
1872 * @dcp_portal: the DCP portal to which the cgr object is registered.
1873 * @opts: optional state of CGR settings
1876 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
1877 struct qm_mcc_initcgr *opts);
1880 * qman_delete_cgr - Deregisters a congestion group object
1881 * @cgr: the 'cgr' object to deregister
1883 * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1884 * is executed. This must be excuted on the same affine portal on which it was
1887 int qman_delete_cgr(struct qman_cgr *cgr);
1890 * qman_modify_cgr - Modify CGR fields
1891 * @cgr: the 'cgr' object to modify
1892 * @flags: QMAN_CGR_FLAG_* values
1893 * @opts: the CGR-modification settings
1895 * The @opts parameter comes from the low-level portal API, and can be NULL.
1896 * Note that some fields and options within @opts may be ignored or overwritten
1897 * by the driver, in particular the 'cgrid' field is ignored (this operation
1898 * only affects the given CGR object). If @flags contains
1899 * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
1900 * unspecified parameters) will be used rather than a modify hw hardware (which
1901 * only modifies the specified parameters).
1903 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
1904 struct qm_mcc_initcgr *opts);
1907 * qman_query_cgr - Queries CGR fields
1908 * @cgr: the 'cgr' object to query
1909 * @result: storage for the queried congestion group record
1911 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
1914 * qman_query_congestion - Queries the state of all congestion groups
1915 * @congestion: storage for the queried state of all congestion groups
1917 int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
1920 * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1921 * @result: is set by the API to the base CGR ID of the allocated range
1922 * @count: the number of CGR IDs required
1923 * @align: required alignment of the allocated range
1924 * @partial: non-zero if the API can return fewer than @count
1926 * Returns the number of CGR IDs allocated, or a negative error code.
1927 * If @partial is non zero, the allocation request may return a smaller range of
1928 * than requested (though alignment will be as requested). If @partial is zero,
1929 * the return value will either be 'count' or negative.
1931 int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
1932 static inline int qman_alloc_cgrid(u32 *result)
1934 int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
1936 return (ret > 0) ? 0 : ret;
1940 * qman_release_cgrid_range - Release the specified range of CGR IDs
1941 * @id: the base CGR ID of the range to deallocate
1942 * @count: the number of CGR IDs in the range
1944 void qman_release_cgrid_range(u32 id, unsigned int count);
1945 static inline void qman_release_cgrid(u32 id)
1947 qman_release_cgrid_range(id, 1);
1951 * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
1952 * @id: the base CGR ID of the range to reserve
1953 * @count: the number of CGR IDs in the range
1955 int qman_reserve_cgrid_range(u32 id, unsigned int count);
1956 static inline int qman_reserve_cgrid(u32 id)
1958 return qman_reserve_cgrid_range(id, 1);
1961 void qman_seed_cgrid_range(u32 id, unsigned int count);
1966 * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
1967 * @fqid: the FQID that will be initialised by other s/w
1969 * In many situations, a FQID is provided for communication between s/w
1970 * entities, and whilst the consumer is responsible for initialising and
1971 * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
1972 * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
1973 * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
1974 * However, data can not be enqueued to the FQ until it is initialised out of
1975 * the OOS state - this function polls for that condition. It is particularly
1976 * useful for users of IPC functions - each endpoint's Rx FQ is the other
1977 * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
1978 * and then use this API on the (NO_MODIFY) Tx FQ object in order to
1979 * synchronise. The function returns zero for success, +1 if the FQ is still in
1980 * the OOS state, or negative if there was an error.
1982 static inline int qman_poll_fq_for_init(struct qman_fq *fq)
1984 struct qm_mcr_queryfq_np np;
1987 err = qman_query_fq_np(fq, &np);
1990 if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
1995 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1996 #define cpu_to_hw_sg(x) (x)
1997 #define hw_sg_to_cpu(x) (x)
1999 #define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
2000 #define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
2002 static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
2004 sgentry->opaque = cpu_to_be64(sgentry->opaque);
2005 sgentry->val = cpu_to_be32(sgentry->val);
2006 sgentry->val_off = cpu_to_be16(sgentry->val_off);
2009 static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
2011 sgentry->opaque = be64_to_cpu(sgentry->opaque);
2012 sgentry->val = be32_to_cpu(sgentry->val);
2013 sgentry->val_off = be16_to_cpu(sgentry->val_off);
2021 #endif /* __FSL_QMAN_H */