2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright 2008-2012 Freescale Semiconductor, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * * Neither the name of the above-listed copyright holders nor the
17 * names of any contributors may be used to endorse or promote products
18 * derived from this software without specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
47 #include <dpaa_rbtree.h>
49 /* Last updated for v00.800 of the BG */
51 /* Hardware constants */
52 #define QM_CHANNEL_SWPORTAL0 0
53 #define QMAN_CHANNEL_POOL1 0x21
54 #define QMAN_CHANNEL_CAAM 0x80
55 #define QMAN_CHANNEL_PME 0xa0
56 #define QMAN_CHANNEL_POOL1_REV3 0x401
57 #define QMAN_CHANNEL_CAAM_REV3 0x840
58 #define QMAN_CHANNEL_PME_REV3 0x860
59 extern u16 qm_channel_pool1;
60 extern u16 qm_channel_caam;
61 extern u16 qm_channel_pme;
63 qm_dc_portal_fman0 = 0,
64 qm_dc_portal_fman1 = 1,
65 qm_dc_portal_caam = 2,
69 /* Portal processing (interrupt) sources */
70 #define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
71 #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
72 #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
73 #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
74 #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
75 #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
77 * This mask contains all the interrupt sources that need handling except DQRI,
78 * ie. that if present should trigger slow-path processing.
80 #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
81 QM_PIRQ_MRI | QM_PIRQ_CCSCI)
83 /* For qman_static_dequeue_*** APIs */
84 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
86 #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
87 /* for conversion from n of qm_channel */
88 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
90 return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
93 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
94 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
95 * FQID(n) to fill in the frame queue ID.
97 #define QM_VDQCR_PRECEDENCE_VDQCR 0x0
98 #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
99 #define QM_VDQCR_EXACT 0x40000000
100 #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
101 #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
102 #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
103 #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
105 /* --- QMan data structures (and associated constants) --- */
107 /* Represents s/w corenet portal mapped data structures */
108 struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
109 struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
110 struct qm_mr_entry; /* MR (Message Ring) entries */
111 struct qm_mc_command; /* MC (Management Command) command */
112 struct qm_mc_result; /* MC result */
114 #define QM_FD_FORMAT_SG 0x4
115 #define QM_FD_FORMAT_LONG 0x2
116 #define QM_FD_FORMAT_COMPOUND 0x1
119 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
120 * scatter-gather table. 'big' implies a 29-bit length with no offset
121 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
122 * implies a s/g-like table, where each entry itself represents a frame
123 * (contiguous or scatter-gather) and the 29-bit "length" is
124 * interpreted purely for congestion calculations, ie. a "congestion
128 qm_fd_contig_big = QM_FD_FORMAT_LONG,
129 qm_fd_sg = QM_FD_FORMAT_SG,
130 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
131 qm_fd_compound = QM_FD_FORMAT_COMPOUND
134 /* Capitalised versions are un-typed but can be used in static expressions */
135 #define QM_FD_CONTIG 0
136 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
137 #define QM_FD_SG QM_FD_FORMAT_SG
138 #define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
139 #define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
141 /* "Frame Descriptor (FD)" */
145 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
146 u8 dd:2; /* dynamic debug */
148 u8 bpid:8; /* Buffer Pool ID */
151 u8 addr_hi; /* high 8-bits of 40-bit address */
152 u32 addr_lo; /* low 32-bits of 40-bit address */
155 u8 dd:2; /* dynamic debug */
156 u8 bpid:8; /* Buffer Pool ID */
159 u8 addr_hi; /* high 8-bits of 40-bit address */
160 u32 addr_lo; /* low 32-bits of 40-bit address */
165 /* More efficient address accessor */
170 /* The 'format' field indicates the interpretation of the remaining 29
171 * bits of the 32-bit word. For packing reasons, it is duplicated in the
172 * other union elements. Note, union'd structs are difficult to use with
173 * static initialisation under gcc, in which case use the "opaque" form
174 * with one of the macros.
177 /* For easier/faster copying of this part of the fd (eg. from a
178 * DQRR entry to an EQCR entry) copy 'opaque'
181 /* If 'format' is _contig or _sg, 20b length and 9b offset */
183 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
184 enum qm_fd_format format:3;
190 enum qm_fd_format format:3;
193 /* If 'format' is _contig_big or _sg_big, 29b length */
195 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
196 enum qm_fd_format _format1:3;
200 enum qm_fd_format _format1:3;
203 /* If 'format' is _compound, 29b "congestion weight" */
205 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
206 enum qm_fd_format _format2:3;
210 enum qm_fd_format _format2:3;
218 } __attribute__((aligned(8)));
219 #define QM_FD_DD_NULL 0x00
220 #define QM_FD_PID_MASK 0x3f
221 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
226 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
228 return (dma_addr_t)fd->addr;
231 /* Macro, so we compile better if 'v' isn't always 64-bit */
232 #define qm_fd_addr_set64(fd, v) \
234 struct qm_fd *__fd931 = (fd); \
238 /* Scatter/Gather table entry */
242 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
244 u8 addr_hi; /* high 8-bits of 40-bit address */
245 u32 addr_lo; /* low 32-bits of 40-bit address */
247 u32 addr_lo; /* low 32-bits of 40-bit address */
248 u8 addr_hi; /* high 8-bits of 40-bit address */
253 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
265 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
266 u32 extension:1; /* Extension bit */
267 u32 final:1; /* Final bit */
271 u32 final:1; /* Final bit */
272 u32 extension:1; /* Extension bit */
281 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
292 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
297 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
299 return (dma_addr_t)sg->addr;
302 /* Macro, so we compile better if 'v' isn't always 64-bit */
303 #define qm_sg_entry_set64(sg, v) \
305 struct qm_sg_entry *__sg931 = (sg); \
309 /* See 1.5.8.1: "Enqueue Command" */
310 struct qm_eqcr_entry {
311 u8 __dont_write_directly__verb;
314 u32 orp; /* 24-bit */
315 u32 fqid; /* 24-bit */
322 /* "Frame Dequeue Response" */
323 struct qm_dqrr_entry {
326 u16 seqnum; /* 15-bit */
329 u32 fqid; /* 24-bit */
335 #define QM_DQRR_VERB_VBIT 0x80
336 #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
337 #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
338 #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
339 #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
340 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
341 #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
342 #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
343 #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
346 /* "ERN Message Response" */
347 /* "FQ State Change Notification" */
354 u8 rc; /* Rejection Code */
356 u32 fqid; /* 24-bit */
361 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
362 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
364 enum qm_dc_portal portal:2;
366 enum qm_dc_portal portal:3;
368 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
371 u8 rc; /* Rejection Code */
373 u32 fqid; /* 24-bit */
378 u8 fqs; /* Frame Queue Status */
380 u32 fqid; /* 24-bit */
383 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
387 #define QM_MR_VERB_VBIT 0x80
389 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
390 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
391 * from the other MR types by noting if the 0x20 bit is unset.
393 #define QM_MR_VERB_TYPE_MASK 0x27
394 #define QM_MR_VERB_DC_ERN 0x20
395 #define QM_MR_VERB_FQRN 0x21
396 #define QM_MR_VERB_FQRNI 0x22
397 #define QM_MR_VERB_FQRL 0x23
398 #define QM_MR_VERB_FQPN 0x24
399 #define QM_MR_RC_MASK 0xf0 /* contains one of; */
400 #define QM_MR_RC_CGR_TAILDROP 0x00
401 #define QM_MR_RC_WRED 0x10
402 #define QM_MR_RC_ERROR 0x20
403 #define QM_MR_RC_ORPWINDOW_EARLY 0x30
404 #define QM_MR_RC_ORPWINDOW_LATE 0x40
405 #define QM_MR_RC_FQ_TAILDROP 0x50
406 #define QM_MR_RC_ORPWINDOW_RETIRED 0x60
407 #define QM_MR_RC_ORP_ZERO 0x70
408 #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
409 #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
410 #define QM_MR_DCERN_COLOUR_GREEN 0x00
411 #define QM_MR_DCERN_COLOUR_YELLOW 0x01
412 #define QM_MR_DCERN_COLOUR_RED 0x02
413 #define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
415 * An identical structure of FQD fields is present in the "Init FQ" command and
416 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
417 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
418 * latter has two inlines to assist with converting to/from the mant+exp
421 struct qm_fqd_stashing {
422 /* See QM_STASHING_EXCL_<...> */
423 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
426 /* Numbers of cachelines */
438 struct qm_fqd_taildrop {
439 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
450 /* "Overhead Accounting Control", see QM_OAC_<...> */
451 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
452 u8 oac:2; /* "Overhead Accounting Control" */
456 u8 oac:2; /* "Overhead Accounting Control" */
458 /* Two's-complement value (-128 to +127) */
459 signed char oal; /* "Overhead Accounting Length" */
465 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
479 u16 fq_ctrl; /* See QM_FQCTRL_<...> */
483 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
484 u16 channel:13; /* qm_channel */
488 u16 channel:13; /* qm_channel */
492 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
500 * For "Initialize Frame Queue" commands, the write-enable mask
501 * determines whether 'td' or 'oac_init' is observed. For query
502 * commands, this field is always 'td', and 'oac_query' (below) reflects
503 * the Overhead ACcounting values.
507 struct qm_fqd_taildrop td;
508 struct qm_fqd_oac oac_init;
512 /* Treat it as 64-bit opaque */
515 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
523 /* Treat it as s/w portal stashing config */
524 /* see "FQD Context_A field used for [...]" */
526 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
527 struct qm_fqd_stashing stashing;
529 * 48-bit address of FQ context to
530 * stash, must be cacheline-aligned
537 struct qm_fqd_stashing stashing;
541 struct qm_fqd_oac oac_query;
543 /* 64-bit converters for context_hi/lo */
544 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
546 return ((u64)fqd->context_a.context_hi << 32) |
547 (u64)fqd->context_a.context_lo;
550 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
552 return (dma_addr_t)qm_fqd_stashing_get64(fqd);
555 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
557 return ((u64)fqd->context_a.hi << 32) |
558 (u64)fqd->context_a.lo;
561 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
563 fqd->context_a.context_hi = upper_32_bits(addr);
564 fqd->context_a.context_lo = lower_32_bits(addr);
567 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
569 fqd->context_a.hi = upper_32_bits(addr);
570 fqd->context_a.lo = lower_32_bits(addr);
573 /* convert a threshold value into mant+exp representation */
574 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
580 if (val > 0xe0000000)
586 if (roundup && oddbit)
594 /* and the other direction */
595 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
597 return (u32)td->mant << td->exp;
601 /* See "Frame Queue Descriptor (FQD)" */
602 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
603 #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
604 #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
605 #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
606 #define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
607 #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
608 #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
609 #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
610 #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
611 #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
612 #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
613 #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
615 /* See "FQD Context_A field used for [...] */
616 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
617 #define QM_STASHING_EXCL_ANNOTATION 0x04
618 #define QM_STASHING_EXCL_DATA 0x02
619 #define QM_STASHING_EXCL_CTX 0x01
621 /* See "Intra Class Scheduling" */
622 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
623 #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
624 #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
627 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
628 * and associated commands/responses. The WRED parameters are calculated from
629 * these fields as follows;
630 * MaxTH = MA * (2 ^ Mn)
631 * Slope = SA / (2 ^ Sn)
632 * MaxP = 4 * (Pn + 1)
634 struct qm_cgr_wr_parm {
638 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
641 u32 SA:7; /* must be between 64-127 */
647 u32 SA:7; /* must be between 64-127 */
655 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
656 * management commands, this is padded to a 16-bit structure field, so that's
657 * how we represent it here. The congestion state threshold is calculated from
658 * these fields as follows;
659 * CS threshold = TA * (2 ^ Tn)
661 struct qm_cgr_cs_thres {
665 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
678 * This identical structure of CGR fields is present in the "Init/Modify CGR"
679 * commands and the "Query CGR" result. It's suctioned out here into its own
683 struct qm_cgr_wr_parm wr_parm_g;
684 struct qm_cgr_wr_parm wr_parm_y;
685 struct qm_cgr_wr_parm wr_parm_r;
686 u8 wr_en_g; /* boolean, use QM_CGR_EN */
687 u8 wr_en_y; /* boolean, use QM_CGR_EN */
688 u8 wr_en_r; /* boolean, use QM_CGR_EN */
689 u8 cscn_en; /* boolean, use QM_CGR_EN */
692 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
693 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
694 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
696 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
697 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
700 u32 cscn_targ; /* use QM_CGR_TARG_* */
702 u8 cstd_en; /* boolean, use QM_CGR_EN */
703 u8 cs; /* boolean, only used in query response */
705 struct qm_cgr_cs_thres cs_thres;
706 /* use qm_cgr_cs_thres_set64() */
709 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
711 #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
712 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
713 #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
714 #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
715 #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
716 #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
717 /* Convert CGR thresholds to/from "cs_thres" format */
718 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
720 return (u64)th->TA << th->Tn;
723 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
733 if (roundup && oddbit)
741 /* See 1.5.8.5.1: "Initialize FQ" */
742 /* See 1.5.8.5.2: "Query FQ" */
743 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
744 /* See 1.5.8.5.4: "Alter FQ State Commands " */
745 /* See 1.5.8.6.1: "Initialize/Modify CGR" */
746 /* See 1.5.8.6.2: "CGR Test Write" */
747 /* See 1.5.8.6.3: "Query CGR" */
748 /* See 1.5.8.6.4: "Query Congestion Group State" */
749 struct qm_mcc_initfq {
751 u16 we_mask; /* Write Enable Mask */
752 u32 fqid; /* 24-bit */
753 u16 count; /* Initialises 'count+1' FQDs */
754 struct qm_fqd fqd; /* the FQD fields go here */
757 struct qm_mcc_queryfq {
759 u32 fqid; /* 24-bit */
762 struct qm_mcc_queryfq_np {
764 u32 fqid; /* 24-bit */
767 struct qm_mcc_alterfq {
769 u32 fqid; /* 24-bit */
771 u8 count; /* number of consecutive FQID */
773 u32 context_b; /* frame queue context b */
776 struct qm_mcc_initcgr {
778 u16 we_mask; /* Write Enable Mask */
779 struct __qm_mc_cgr cgr; /* CGR fields */
784 struct qm_mcc_cgrtestwrite {
786 u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
787 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
792 struct qm_mcc_querycgr {
797 struct qm_mcc_querycongestion {
800 struct qm_mcc_querywq {
802 /* select channel if verb != QUERYWQ_DEDICATED */
804 u16 channel_wq; /* ignores wq (3 lsbits) */
806 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
807 u16 id:13; /* qm_channel */
811 u16 id:13; /* qm_channel */
818 struct qm_mc_command {
819 u8 __dont_write_directly__verb;
821 struct qm_mcc_initfq initfq;
822 struct qm_mcc_queryfq queryfq;
823 struct qm_mcc_queryfq_np queryfq_np;
824 struct qm_mcc_alterfq alterfq;
825 struct qm_mcc_initcgr initcgr;
826 struct qm_mcc_cgrtestwrite cgrtestwrite;
827 struct qm_mcc_querycgr querycgr;
828 struct qm_mcc_querycongestion querycongestion;
829 struct qm_mcc_querywq querywq;
833 /* INITFQ-specific flags */
834 #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
835 #define QM_INITFQ_WE_OAC 0x0100
836 #define QM_INITFQ_WE_ORPC 0x0080
837 #define QM_INITFQ_WE_CGID 0x0040
838 #define QM_INITFQ_WE_FQCTRL 0x0020
839 #define QM_INITFQ_WE_DESTWQ 0x0010
840 #define QM_INITFQ_WE_ICSCRED 0x0008
841 #define QM_INITFQ_WE_TDTHRESH 0x0004
842 #define QM_INITFQ_WE_CONTEXTB 0x0002
843 #define QM_INITFQ_WE_CONTEXTA 0x0001
844 /* INITCGR/MODIFYCGR-specific flags */
845 #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
846 #define QM_CGR_WE_WR_PARM_G 0x0400
847 #define QM_CGR_WE_WR_PARM_Y 0x0200
848 #define QM_CGR_WE_WR_PARM_R 0x0100
849 #define QM_CGR_WE_WR_EN_G 0x0080
850 #define QM_CGR_WE_WR_EN_Y 0x0040
851 #define QM_CGR_WE_WR_EN_R 0x0020
852 #define QM_CGR_WE_CSCN_EN 0x0010
853 #define QM_CGR_WE_CSCN_TARG 0x0008
854 #define QM_CGR_WE_CSTD_EN 0x0004
855 #define QM_CGR_WE_CS_THRES 0x0002
856 #define QM_CGR_WE_MODE 0x0001
858 struct qm_mcr_initfq {
861 struct qm_mcr_queryfq {
863 struct qm_fqd fqd; /* the FQD fields are here */
866 struct qm_mcr_queryfq_np {
868 u8 state; /* QM_MCR_NP_STATE_*** */
869 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
896 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
897 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
899 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
900 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
901 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
938 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
939 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
941 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
942 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
943 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
947 struct qm_mcr_alterfq {
948 u8 fqs; /* Frame Queue Status */
951 struct qm_mcr_initcgr {
954 struct qm_mcr_cgrtestwrite {
956 struct __qm_mc_cgr cgr; /* CGR fields */
959 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
960 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
962 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
963 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
964 u16 lgt; /* Last Group Tick */
970 struct qm_mcr_querycgr {
972 struct __qm_mc_cgr cgr; /* CGR fields */
976 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
978 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
979 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
981 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
982 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
990 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
992 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
993 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
995 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
996 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
1003 u32 cscn_targ_swp[4];
1008 struct __qm_mcr_querycongestion {
1012 struct qm_mcr_querycongestion {
1014 /* Access this struct using QM_MCR_QUERYCONGESTION() */
1015 struct __qm_mcr_querycongestion state;
1017 struct qm_mcr_querywq {
1019 u16 channel_wq; /* ignores wq (3 lsbits) */
1021 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1022 u16 id:13; /* qm_channel */
1026 u16 id:13; /* qm_channel */
1034 struct qm_mc_result {
1038 struct qm_mcr_initfq initfq;
1039 struct qm_mcr_queryfq queryfq;
1040 struct qm_mcr_queryfq_np queryfq_np;
1041 struct qm_mcr_alterfq alterfq;
1042 struct qm_mcr_initcgr initcgr;
1043 struct qm_mcr_cgrtestwrite cgrtestwrite;
1044 struct qm_mcr_querycgr querycgr;
1045 struct qm_mcr_querycongestion querycongestion;
1046 struct qm_mcr_querywq querywq;
1050 #define QM_MCR_VERB_RRID 0x80
1051 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
1052 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
1053 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
1054 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
1055 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
1056 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
1057 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
1058 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
1059 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
1060 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
1061 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
1062 #define QM_MCR_RESULT_NULL 0x00
1063 #define QM_MCR_RESULT_OK 0xf0
1064 #define QM_MCR_RESULT_ERR_FQID 0xf1
1065 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
1066 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
1067 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
1068 #define QM_MCR_RESULT_PENDING 0xf8
1069 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
1070 #define QM_MCR_NP_STATE_FE 0x10
1071 #define QM_MCR_NP_STATE_R 0x08
1072 #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
1073 #define QM_MCR_NP_STATE_OOS 0x00
1074 #define QM_MCR_NP_STATE_RETIRED 0x01
1075 #define QM_MCR_NP_STATE_TEN_SCHED 0x02
1076 #define QM_MCR_NP_STATE_TRU_SCHED 0x03
1077 #define QM_MCR_NP_STATE_PARKED 0x04
1078 #define QM_MCR_NP_STATE_ACTIVE 0x05
1079 #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
1080 #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
1081 #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
1082 #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
1083 #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
1084 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
1085 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
1086 /* This extracts the state for congestion group 'n' from a query response.
1089 * struct qm_mc_result *res = [...];
1090 * printf("congestion group %d congestion state: %d\n", cgr,
1091 * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1093 #define __CGR_WORD(num) (num >> 5)
1094 #define __CGR_SHIFT(num) (num & 0x1f)
1095 #define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
1096 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1099 return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1102 /* Portal and Frame Queues */
1103 /* Represents a managed portal */
1107 * This object type represents QMan frame queue descriptors (FQD), it is
1108 * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1109 * defined further down.
1114 * This object type represents a QMan congestion group, it is defined further
1120 * This enum, and the callback type that returns it, are used when handling
1121 * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1122 * portal object (for handling dequeues that do not demux because context_b is
1123 * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1125 enum qman_cb_dqrr_result {
1126 /* DQRR entry can be consumed */
1127 qman_cb_dqrr_consume,
1128 /* Like _consume, but requests parking - FQ must be held-active */
1130 /* Does not consume, for DCA mode only. This allows out-of-order
1131 * consumes by explicit calls to qman_dca() and/or the use of implicit
1132 * DCA via EQCR entries.
1136 * Stop processing without consuming this ring entry. Exits the current
1137 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1138 * an interrupt handler, the callback would typically call
1139 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1140 * otherwise the interrupt will reassert immediately.
1143 /* Like qman_cb_dqrr_stop, but consumes the current entry. */
1144 qman_cb_dqrr_consume_stop
1147 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1149 const struct qm_dqrr_entry *dqrr);
1152 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1153 * are always consumed after the callback returns.
1155 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1156 const struct qm_mr_entry *msg);
1158 /* This callback type is used when handling DCP ERNs */
1159 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1160 const struct qm_mr_entry *msg);
1162 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1163 * held-active + held-suspended are just "sched". Things like "retired" will not
1164 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1165 * then, to indicate it's completing and to gate attempts to retry the retire
1166 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1167 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1168 * index rather than the FQ that ring entry corresponds to), so repeated park
1169 * commands are allowed (if you're silly enough to try) but won't change FQ
1170 * state, and the resulting park notifications move FQs from "sched" to
1173 enum qman_fq_state {
1175 qman_fq_state_parked,
1176 qman_fq_state_sched,
1177 qman_fq_state_retired
1182 * Frame queue objects (struct qman_fq) are stored within memory passed to
1183 * qman_create_fq(), as this allows stashing of caller-provided demux callback
1184 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1185 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1188 * (a) extend the qman_fq structure with their state; eg.
1190 * // myfq is allocated and driver_fq callbacks filled in;
1192 * struct qman_fq base;
1193 * int an_extra_field;
1194 * [ ... add other fields to be associated with each FQ ...]
1195 * } *myfq = some_my_fq_allocator();
1196 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1198 * // in a dequeue callback, access extra fields from 'fq' via a cast;
1199 * struct my_fq *myfq = (struct my_fq *)fq;
1200 * do_something_with(myfq->an_extra_field);
1203 * (b) when and if configuring the FQ for context stashing, specify how ever
1204 * many cachelines are required to stash 'struct my_fq', to accelerate not
1205 * only the QMan driver but the callback as well.
1209 qman_cb_dqrr dqrr; /* for dequeued frames */
1210 qman_cb_mr ern; /* for s/w ERNs */
1211 qman_cb_mr fqs; /* frame-queue state changes*/
1215 /* Caller of qman_create_fq() provides these demux callbacks */
1216 struct qman_fq_cb cb;
1218 * These are internal to the driver, don't touch. In particular, they
1219 * may change, be removed, or extended (so you shouldn't rely on
1220 * sizeof(qman_fq) being a constant).
1224 /* DPDK Interface */
1227 volatile unsigned long flags;
1228 enum qman_fq_state state;
1230 struct rb_node node;
1234 * This callback type is used when handling congestion group entry/exit.
1235 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1237 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1238 struct qman_cgr *cgr, int congested);
1241 /* Set these prior to qman_create_cgr() */
1242 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1244 /* These are private to the driver */
1245 u16 chan; /* portal channel this object is created on */
1246 struct list_head node;
1254 #endif /* __FSL_QMAN_H */