1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2012 Freescale Semiconductor, Inc.
15 #include <dpaa_rbtree.h>
16 #include <rte_eventdev.h>
18 /* FQ lookups (turn this on for 64bit user-space) */
19 #if (__WORDSIZE == 64)
20 #define CONFIG_FSL_QMAN_FQ_LOOKUP
21 /* if FQ lookups are supported, this controls the number of initialised,
22 * s/w-consumed FQs that can be supported at any one time.
24 #define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
27 /* Last updated for v00.800 of the BG */
29 /* Hardware constants */
30 #define QM_CHANNEL_SWPORTAL0 0
31 #define QMAN_CHANNEL_POOL1 0x21
32 #define QMAN_CHANNEL_CAAM 0x80
33 #define QMAN_CHANNEL_PME 0xa0
34 #define QMAN_CHANNEL_POOL1_REV3 0x401
35 #define QMAN_CHANNEL_CAAM_REV3 0x840
36 #define QMAN_CHANNEL_PME_REV3 0x860
37 extern u16 qm_channel_pool1;
38 extern u16 qm_channel_caam;
39 extern u16 qm_channel_pme;
41 qm_dc_portal_fman0 = 0,
42 qm_dc_portal_fman1 = 1,
43 qm_dc_portal_caam = 2,
47 /* Portal processing (interrupt) sources */
48 #define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
49 #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
50 #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
51 #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
52 #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
53 #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
55 * This mask contains all the interrupt sources that need handling except DQRI,
56 * ie. that if present should trigger slow-path processing.
58 #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
59 QM_PIRQ_MRI | QM_PIRQ_CCSCI)
61 /* For qman_static_dequeue_*** APIs */
62 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
64 #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
65 /* for conversion from n of qm_channel */
66 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
68 return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
71 /* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
72 * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
73 * FQID(n) to fill in the frame queue ID.
75 #define QM_VDQCR_PRECEDENCE_VDQCR 0x0
76 #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
77 #define QM_VDQCR_EXACT 0x40000000
78 #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
79 #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
80 #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
81 #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
83 /* --- QMan data structures (and associated constants) --- */
85 /* Represents s/w corenet portal mapped data structures */
86 struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
87 struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
88 struct qm_mr_entry; /* MR (Message Ring) entries */
89 struct qm_mc_command; /* MC (Management Command) command */
90 struct qm_mc_result; /* MC result */
92 #define QM_FD_FORMAT_SG 0x4
93 #define QM_FD_FORMAT_LONG 0x2
94 #define QM_FD_FORMAT_COMPOUND 0x1
97 * 'contig' implies a contiguous buffer, whereas 'sg' implies a
98 * scatter-gather table. 'big' implies a 29-bit length with no offset
99 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
100 * implies a s/g-like table, where each entry itself represents a frame
101 * (contiguous or scatter-gather) and the 29-bit "length" is
102 * interpreted purely for congestion calculations, ie. a "congestion
106 qm_fd_contig_big = QM_FD_FORMAT_LONG,
107 qm_fd_sg = QM_FD_FORMAT_SG,
108 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
109 qm_fd_compound = QM_FD_FORMAT_COMPOUND
112 /* Capitalised versions are un-typed but can be used in static expressions */
113 #define QM_FD_CONTIG 0
114 #define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
115 #define QM_FD_SG QM_FD_FORMAT_SG
116 #define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
117 #define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
119 /* "Frame Descriptor (FD)" */
123 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
124 u8 dd:2; /* dynamic debug */
126 u8 bpid:8; /* Buffer Pool ID */
129 u8 addr_hi; /* high 8-bits of 40-bit address */
130 u32 addr_lo; /* low 32-bits of 40-bit address */
133 u8 dd:2; /* dynamic debug */
134 u8 bpid:8; /* Buffer Pool ID */
137 u8 addr_hi; /* high 8-bits of 40-bit address */
138 u32 addr_lo; /* low 32-bits of 40-bit address */
143 /* More efficient address accessor */
148 /* The 'format' field indicates the interpretation of the remaining 29
149 * bits of the 32-bit word. For packing reasons, it is duplicated in the
150 * other union elements. Note, union'd structs are difficult to use with
151 * static initialisation under gcc, in which case use the "opaque" form
152 * with one of the macros.
155 /* For easier/faster copying of this part of the fd (eg. from a
156 * DQRR entry to an EQCR entry) copy 'opaque'
159 /* If 'format' is _contig or _sg, 20b length and 9b offset */
161 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
162 enum qm_fd_format format:3;
168 enum qm_fd_format format:3;
171 /* If 'format' is _contig_big or _sg_big, 29b length */
173 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
174 enum qm_fd_format _format1:3;
178 enum qm_fd_format _format1:3;
181 /* If 'format' is _compound, 29b "congestion weight" */
183 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
184 enum qm_fd_format _format2:3;
188 enum qm_fd_format _format2:3;
196 } __attribute__((aligned(8)));
197 #define QM_FD_DD_NULL 0x00
198 #define QM_FD_PID_MASK 0x3f
199 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
204 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
206 return (dma_addr_t)fd->addr;
209 /* Macro, so we compile better if 'v' isn't always 64-bit */
210 #define qm_fd_addr_set64(fd, v) \
212 struct qm_fd *__fd931 = (fd); \
216 /* Scatter/Gather table entry */
220 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
222 u8 addr_hi; /* high 8-bits of 40-bit address */
223 u32 addr_lo; /* low 32-bits of 40-bit address */
225 u32 addr_lo; /* low 32-bits of 40-bit address */
226 u8 addr_hi; /* high 8-bits of 40-bit address */
231 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
243 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
244 u32 extension:1; /* Extension bit */
245 u32 final:1; /* Final bit */
249 u32 final:1; /* Final bit */
250 u32 extension:1; /* Extension bit */
259 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
270 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
275 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
277 return (dma_addr_t)sg->addr;
280 /* Macro, so we compile better if 'v' isn't always 64-bit */
281 #define qm_sg_entry_set64(sg, v) \
283 struct qm_sg_entry *__sg931 = (sg); \
287 /* See 1.5.8.1: "Enqueue Command" */
288 struct __rte_aligned(8) qm_eqcr_entry {
289 u8 __dont_write_directly__verb;
292 u32 orp; /* 24-bit */
293 u32 fqid; /* 24-bit */
295 struct qm_fd fd; /* this has alignment 8 */
300 /* "Frame Dequeue Response" */
301 struct __rte_aligned(8) qm_dqrr_entry {
304 u16 seqnum; /* 15-bit */
307 u32 fqid; /* 24-bit */
309 struct qm_fd fd; /* this has alignment 8 */
313 #define QM_DQRR_VERB_VBIT 0x80
314 #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
315 #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
316 #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
317 #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
318 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
319 #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
320 #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
321 #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
324 /* "ERN Message Response" */
325 /* "FQ State Change Notification" */
332 u8 rc; /* Rejection Code */
334 u32 fqid; /* 24-bit */
336 struct qm_fd fd; /* this has alignment 8 */
337 } __packed __rte_aligned(8) ern;
340 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
341 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
343 enum qm_dc_portal portal:2;
345 enum qm_dc_portal portal:3;
347 u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
350 u8 rc; /* Rejection Code */
352 u32 fqid; /* 24-bit */
354 struct qm_fd fd; /* this has alignment 8 */
355 } __packed __rte_aligned(8) dcern;
358 u8 fqs; /* Frame Queue Status */
360 u32 fqid; /* 24-bit */
363 } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
366 } __packed __rte_aligned(8);
367 #define QM_MR_VERB_VBIT 0x80
369 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
370 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
371 * from the other MR types by noting if the 0x20 bit is unset.
373 #define QM_MR_VERB_TYPE_MASK 0x27
374 #define QM_MR_VERB_DC_ERN 0x20
375 #define QM_MR_VERB_FQRN 0x21
376 #define QM_MR_VERB_FQRNI 0x22
377 #define QM_MR_VERB_FQRL 0x23
378 #define QM_MR_VERB_FQPN 0x24
379 #define QM_MR_RC_MASK 0xf0 /* contains one of; */
380 #define QM_MR_RC_CGR_TAILDROP 0x00
381 #define QM_MR_RC_WRED 0x10
382 #define QM_MR_RC_ERROR 0x20
383 #define QM_MR_RC_ORPWINDOW_EARLY 0x30
384 #define QM_MR_RC_ORPWINDOW_LATE 0x40
385 #define QM_MR_RC_FQ_TAILDROP 0x50
386 #define QM_MR_RC_ORPWINDOW_RETIRED 0x60
387 #define QM_MR_RC_ORP_ZERO 0x70
388 #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
389 #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
390 #define QM_MR_DCERN_COLOUR_GREEN 0x00
391 #define QM_MR_DCERN_COLOUR_YELLOW 0x01
392 #define QM_MR_DCERN_COLOUR_RED 0x02
393 #define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
395 * An identical structure of FQD fields is present in the "Init FQ" command and
396 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
397 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
398 * latter has two inlines to assist with converting to/from the mant+exp
401 struct qm_fqd_stashing {
402 /* See QM_STASHING_EXCL_<...> */
403 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
406 /* Numbers of cachelines */
418 struct qm_fqd_taildrop {
419 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
430 /* "Overhead Accounting Control", see QM_OAC_<...> */
431 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
432 u8 oac:2; /* "Overhead Accounting Control" */
436 u8 oac:2; /* "Overhead Accounting Control" */
438 /* Two's-complement value (-128 to +127) */
439 signed char oal; /* "Overhead Accounting Length" */
445 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
459 u16 fq_ctrl; /* See QM_FQCTRL_<...> */
463 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
464 u16 channel:13; /* qm_channel */
468 u16 channel:13; /* qm_channel */
472 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
480 * For "Initialize Frame Queue" commands, the write-enable mask
481 * determines whether 'td' or 'oac_init' is observed. For query
482 * commands, this field is always 'td', and 'oac_query' (below) reflects
483 * the Overhead ACcounting values.
487 struct qm_fqd_taildrop td;
488 struct qm_fqd_oac oac_init;
492 /* Treat it as 64-bit opaque */
495 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
503 /* Treat it as s/w portal stashing config */
504 /* see "FQD Context_A field used for [...]" */
506 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
507 struct qm_fqd_stashing stashing;
509 * 48-bit address of FQ context to
510 * stash, must be cacheline-aligned
517 struct qm_fqd_stashing stashing;
521 struct qm_fqd_oac oac_query;
523 /* 64-bit converters for context_hi/lo */
524 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
526 return ((u64)fqd->context_a.context_hi << 32) |
527 (u64)fqd->context_a.context_lo;
530 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
532 return (dma_addr_t)qm_fqd_stashing_get64(fqd);
535 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
537 return ((u64)fqd->context_a.hi << 32) |
538 (u64)fqd->context_a.lo;
541 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
543 fqd->context_a.context_hi = upper_32_bits(addr);
544 fqd->context_a.context_lo = lower_32_bits(addr);
547 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
549 fqd->context_a.hi = upper_32_bits(addr);
550 fqd->context_a.lo = lower_32_bits(addr);
553 /* convert a threshold value into mant+exp representation */
554 static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
560 if (val > 0xe0000000)
566 if (roundup && oddbit)
574 /* and the other direction */
575 static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
577 return (u32)td->mant << td->exp;
581 /* See "Frame Queue Descriptor (FQD)" */
582 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
583 #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
584 #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
585 #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
586 #define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
587 #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
588 #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
589 #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
590 #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
591 #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
592 #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
593 #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
595 /* See "FQD Context_A field used for [...] */
596 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
597 #define QM_STASHING_EXCL_ANNOTATION 0x04
598 #define QM_STASHING_EXCL_DATA 0x02
599 #define QM_STASHING_EXCL_CTX 0x01
601 /* See "Intra Class Scheduling" */
602 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
603 #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
604 #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
607 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
608 * and associated commands/responses. The WRED parameters are calculated from
609 * these fields as follows;
610 * MaxTH = MA * (2 ^ Mn)
611 * Slope = SA / (2 ^ Sn)
612 * MaxP = 4 * (Pn + 1)
614 struct qm_cgr_wr_parm {
618 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
621 u32 SA:7; /* must be between 64-127 */
627 u32 SA:7; /* must be between 64-127 */
635 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
636 * management commands, this is padded to a 16-bit structure field, so that's
637 * how we represent it here. The congestion state threshold is calculated from
638 * these fields as follows;
639 * CS threshold = TA * (2 ^ Tn)
641 struct qm_cgr_cs_thres {
645 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
658 * This identical structure of CGR fields is present in the "Init/Modify CGR"
659 * commands and the "Query CGR" result. It's suctioned out here into its own
663 struct qm_cgr_wr_parm wr_parm_g;
664 struct qm_cgr_wr_parm wr_parm_y;
665 struct qm_cgr_wr_parm wr_parm_r;
666 u8 wr_en_g; /* boolean, use QM_CGR_EN */
667 u8 wr_en_y; /* boolean, use QM_CGR_EN */
668 u8 wr_en_r; /* boolean, use QM_CGR_EN */
669 u8 cscn_en; /* boolean, use QM_CGR_EN */
672 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
673 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
674 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
676 u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
677 u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
680 u32 cscn_targ; /* use QM_CGR_TARG_* */
682 u8 cstd_en; /* boolean, use QM_CGR_EN */
683 u8 cs; /* boolean, only used in query response */
685 struct qm_cgr_cs_thres cs_thres;
686 /* use qm_cgr_cs_thres_set64() */
689 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
691 #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
692 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
693 #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
694 #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
695 #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
696 #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
697 /* Convert CGR thresholds to/from "cs_thres" format */
698 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
700 return (u64)th->TA << th->Tn;
703 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
713 if (roundup && oddbit)
721 /* See 1.5.8.5.1: "Initialize FQ" */
722 /* See 1.5.8.5.2: "Query FQ" */
723 /* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
724 /* See 1.5.8.5.4: "Alter FQ State Commands " */
725 /* See 1.5.8.6.1: "Initialize/Modify CGR" */
726 /* See 1.5.8.6.2: "CGR Test Write" */
727 /* See 1.5.8.6.3: "Query CGR" */
728 /* See 1.5.8.6.4: "Query Congestion Group State" */
729 struct qm_mcc_initfq {
731 u16 we_mask; /* Write Enable Mask */
732 u32 fqid; /* 24-bit */
733 u16 count; /* Initialises 'count+1' FQDs */
734 struct qm_fqd fqd; /* the FQD fields go here */
737 struct qm_mcc_queryfq {
739 u32 fqid; /* 24-bit */
742 struct qm_mcc_queryfq_np {
744 u32 fqid; /* 24-bit */
747 struct qm_mcc_alterfq {
749 u32 fqid; /* 24-bit */
751 u8 count; /* number of consecutive FQID */
753 u32 context_b; /* frame queue context b */
756 struct qm_mcc_initcgr {
758 u16 we_mask; /* Write Enable Mask */
759 struct __qm_mc_cgr cgr; /* CGR fields */
764 struct qm_mcc_cgrtestwrite {
766 u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
767 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
772 struct qm_mcc_querycgr {
777 struct qm_mcc_querycongestion {
780 struct qm_mcc_querywq {
782 /* select channel if verb != QUERYWQ_DEDICATED */
784 u16 channel_wq; /* ignores wq (3 lsbits) */
786 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
787 u16 id:13; /* qm_channel */
791 u16 id:13; /* qm_channel */
798 struct qm_mc_command {
799 u8 __dont_write_directly__verb;
801 struct qm_mcc_initfq initfq;
802 struct qm_mcc_queryfq queryfq;
803 struct qm_mcc_queryfq_np queryfq_np;
804 struct qm_mcc_alterfq alterfq;
805 struct qm_mcc_initcgr initcgr;
806 struct qm_mcc_cgrtestwrite cgrtestwrite;
807 struct qm_mcc_querycgr querycgr;
808 struct qm_mcc_querycongestion querycongestion;
809 struct qm_mcc_querywq querywq;
813 /* INITFQ-specific flags */
814 #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
815 #define QM_INITFQ_WE_OAC 0x0100
816 #define QM_INITFQ_WE_ORPC 0x0080
817 #define QM_INITFQ_WE_CGID 0x0040
818 #define QM_INITFQ_WE_FQCTRL 0x0020
819 #define QM_INITFQ_WE_DESTWQ 0x0010
820 #define QM_INITFQ_WE_ICSCRED 0x0008
821 #define QM_INITFQ_WE_TDTHRESH 0x0004
822 #define QM_INITFQ_WE_CONTEXTB 0x0002
823 #define QM_INITFQ_WE_CONTEXTA 0x0001
824 /* INITCGR/MODIFYCGR-specific flags */
825 #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
826 #define QM_CGR_WE_WR_PARM_G 0x0400
827 #define QM_CGR_WE_WR_PARM_Y 0x0200
828 #define QM_CGR_WE_WR_PARM_R 0x0100
829 #define QM_CGR_WE_WR_EN_G 0x0080
830 #define QM_CGR_WE_WR_EN_Y 0x0040
831 #define QM_CGR_WE_WR_EN_R 0x0020
832 #define QM_CGR_WE_CSCN_EN 0x0010
833 #define QM_CGR_WE_CSCN_TARG 0x0008
834 #define QM_CGR_WE_CSTD_EN 0x0004
835 #define QM_CGR_WE_CS_THRES 0x0002
836 #define QM_CGR_WE_MODE 0x0001
838 struct qm_mcr_initfq {
841 struct qm_mcr_queryfq {
843 struct qm_fqd fqd; /* the FQD fields are here */
846 struct qm_mcr_queryfq_np {
848 u8 state; /* QM_MCR_NP_STATE_*** */
849 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
876 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
877 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
879 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
880 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
881 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
918 u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
919 u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
921 u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
922 u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
923 u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
927 struct qm_mcr_alterfq {
928 u8 fqs; /* Frame Queue Status */
931 struct qm_mcr_initcgr {
934 struct qm_mcr_cgrtestwrite {
936 struct __qm_mc_cgr cgr; /* CGR fields */
939 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
940 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
942 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
943 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
944 u16 lgt; /* Last Group Tick */
950 struct qm_mcr_querycgr {
952 struct __qm_mc_cgr cgr; /* CGR fields */
956 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
958 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
959 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
961 u32 i_bcnt_lo; /* low 32-bits of 40-bit */
962 u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
970 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
972 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
973 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
975 u32 a_bcnt_lo; /* low 32-bits of 40-bit */
976 u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
983 u32 cscn_targ_swp[4];
988 struct __qm_mcr_querycongestion {
992 struct qm_mcr_querycongestion {
994 /* Access this struct using QM_MCR_QUERYCONGESTION() */
995 struct __qm_mcr_querycongestion state;
997 struct qm_mcr_querywq {
999 u16 channel_wq; /* ignores wq (3 lsbits) */
1001 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1002 u16 id:13; /* qm_channel */
1006 u16 id:13; /* qm_channel */
1014 struct qm_mc_result {
1018 struct qm_mcr_initfq initfq;
1019 struct qm_mcr_queryfq queryfq;
1020 struct qm_mcr_queryfq_np queryfq_np;
1021 struct qm_mcr_alterfq alterfq;
1022 struct qm_mcr_initcgr initcgr;
1023 struct qm_mcr_cgrtestwrite cgrtestwrite;
1024 struct qm_mcr_querycgr querycgr;
1025 struct qm_mcr_querycongestion querycongestion;
1026 struct qm_mcr_querywq querywq;
1030 #define QM_MCR_VERB_RRID 0x80
1031 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
1032 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
1033 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
1034 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
1035 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
1036 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
1037 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
1038 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
1039 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
1040 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
1041 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
1042 #define QM_MCR_RESULT_NULL 0x00
1043 #define QM_MCR_RESULT_OK 0xf0
1044 #define QM_MCR_RESULT_ERR_FQID 0xf1
1045 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
1046 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
1047 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
1048 #define QM_MCR_RESULT_PENDING 0xf8
1049 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
1050 #define QM_MCR_NP_STATE_FE 0x10
1051 #define QM_MCR_NP_STATE_R 0x08
1052 #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
1053 #define QM_MCR_NP_STATE_OOS 0x00
1054 #define QM_MCR_NP_STATE_RETIRED 0x01
1055 #define QM_MCR_NP_STATE_TEN_SCHED 0x02
1056 #define QM_MCR_NP_STATE_TRU_SCHED 0x03
1057 #define QM_MCR_NP_STATE_PARKED 0x04
1058 #define QM_MCR_NP_STATE_ACTIVE 0x05
1059 #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
1060 #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
1061 #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
1062 #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
1063 #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
1064 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
1065 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
1066 /* This extracts the state for congestion group 'n' from a query response.
1069 * struct qm_mc_result *res = [...];
1070 * printf("congestion group %d congestion state: %d\n", cgr,
1071 * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
1073 #define __CGR_WORD(num) (num >> 5)
1074 #define __CGR_SHIFT(num) (num & 0x1f)
1075 #define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
1076 static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
1079 return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
1082 /* Portal and Frame Queues */
1083 /* Represents a managed portal */
1087 * This object type represents QMan frame queue descriptors (FQD), it is
1088 * cacheline-aligned, and initialised by qman_create_fq(). The structure is
1089 * defined further down.
1094 * This object type represents a QMan congestion group, it is defined further
1100 * This enum, and the callback type that returns it, are used when handling
1101 * dequeued frames via DQRR. Note that for "null" callbacks registered with the
1102 * portal object (for handling dequeues that do not demux because context_b is
1103 * NULL), the return value *MUST* be qman_cb_dqrr_consume.
1105 enum qman_cb_dqrr_result {
1106 /* DQRR entry can be consumed */
1107 qman_cb_dqrr_consume,
1108 /* Like _consume, but requests parking - FQ must be held-active */
1110 /* Does not consume, for DCA mode only. This allows out-of-order
1111 * consumes by explicit calls to qman_dca() and/or the use of implicit
1112 * DCA via EQCR entries.
1116 * Stop processing without consuming this ring entry. Exits the current
1117 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
1118 * an interrupt handler, the callback would typically call
1119 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
1120 * otherwise the interrupt will reassert immediately.
1123 /* Like qman_cb_dqrr_stop, but consumes the current entry. */
1124 qman_cb_dqrr_consume_stop
1127 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
1129 const struct qm_dqrr_entry *dqrr);
1131 typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
1132 struct qman_portal *qm,
1134 const struct qm_dqrr_entry *dqrr,
1137 /* This callback type is used when handling buffers in dpdk pull mode */
1138 typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
1139 struct qm_dqrr_entry **dqrr,
1143 typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
1146 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
1147 * are always consumed after the callback returns.
1149 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
1150 const struct qm_mr_entry *msg);
1152 /* This callback type is used when handling DCP ERNs */
1153 typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
1154 const struct qm_mr_entry *msg);
1156 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
1157 * held-active + held-suspended are just "sched". Things like "retired" will not
1158 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
1159 * then, to indicate it's completing and to gate attempts to retry the retire
1160 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
1161 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
1162 * index rather than the FQ that ring entry corresponds to), so repeated park
1163 * commands are allowed (if you're silly enough to try) but won't change FQ
1164 * state, and the resulting park notifications move FQs from "sched" to
1167 enum qman_fq_state {
1169 qman_fq_state_parked,
1170 qman_fq_state_sched,
1171 qman_fq_state_retired
1176 * Frame queue objects (struct qman_fq) are stored within memory passed to
1177 * qman_create_fq(), as this allows stashing of caller-provided demux callback
1178 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
1179 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
1182 * (a) extend the qman_fq structure with their state; eg.
1184 * // myfq is allocated and driver_fq callbacks filled in;
1186 * struct qman_fq base;
1187 * int an_extra_field;
1188 * [ ... add other fields to be associated with each FQ ...]
1189 * } *myfq = some_my_fq_allocator();
1190 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
1192 * // in a dequeue callback, access extra fields from 'fq' via a cast;
1193 * struct my_fq *myfq = (struct my_fq *)fq;
1194 * do_something_with(myfq->an_extra_field);
1197 * (b) when and if configuring the FQ for context stashing, specify how ever
1198 * many cachelines are required to stash 'struct my_fq', to accelerate not
1199 * only the QMan driver but the callback as well.
1203 union { /* for dequeued frames */
1204 qman_dpdk_cb_dqrr dqrr_dpdk_cb;
1205 qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
1208 qman_dpdk_cb_prepare dqrr_prepare;
1209 qman_cb_mr ern; /* for s/w ERNs */
1210 qman_cb_mr fqs; /* frame-queue state changes*/
1214 /* Caller of qman_create_fq() provides these demux callbacks */
1215 struct qman_fq_cb cb;
1222 /* DPDK Interface */
1225 struct rte_event ev;
1226 /* affined portal in case of static queue */
1227 struct qman_portal *qp;
1228 struct dpaa_bp_info *bp_array;
1230 volatile unsigned long flags;
1232 enum qman_fq_state state;
1236 struct rb_node node;
1237 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1238 void **qman_fq_lookup_table;
1244 * This callback type is used when handling congestion group entry/exit.
1245 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
1247 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
1248 struct qman_cgr *cgr, int congested);
1251 /* Set these prior to qman_create_cgr() */
1252 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
1254 /* These are private to the driver */
1255 u16 chan; /* portal channel this object is created on */
1256 struct list_head node;
1259 /* Flags to qman_create_fq() */
1260 #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
1261 #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
1262 #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
1263 #define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
1264 #define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
1265 #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
1267 /* Flags to qman_destroy_fq() */
1268 #define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
1270 /* Flags from qman_fq_state() */
1271 #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
1272 #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
1273 #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
1274 #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
1275 #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
1276 #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
1278 /* Flags to qman_init_fq() */
1279 #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
1280 #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
1282 /* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
1283 * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
1284 * any change here should be audited in PME.)
1286 #define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
1287 #define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
1288 #define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
1289 #define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
1290 (((u32)(p) << 2) & 0x00000f00)
1291 #define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
1292 #define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
1293 #define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
1294 #define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
1295 /* For the ORP-specific qman_enqueue_orp() variant;
1296 * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
1299 #define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
1300 /* - this flag performs no enqueue but fills in an ORP sequence number that
1301 * would otherwise block it (eg. if a frame has been dropped).
1303 #define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
1304 /* - this flag performs no enqueue but advances NESN to the given sequence
1307 #define QMAN_ENQUEUE_FLAG_NESN 0x04000000
1309 /* Flags to qman_modify_cgr() */
1310 #define QMAN_CGR_FLAG_USE_INIT 0x00000001
1311 #define QMAN_CGR_MODE_FRAME 0x00000001
1313 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1314 void qman_set_fq_lookup_table(void **table);
1318 * qman_get_portal_index - get portal configuration index
1320 int qman_get_portal_index(void);
1322 u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1326 * qman_irqsource_add - add processing sources to be interrupt-driven
1327 * @bits: bitmask of QM_PIRQ_**I processing sources
1329 * Adds processing sources that should be interrupt-driven (rather than
1330 * processed via qman_poll_***() functions). Returns zero for success, or
1331 * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1333 int qman_irqsource_add(u32 bits);
1336 * qman_irqsource_remove - remove processing sources from being interrupt-driven
1337 * @bits: bitmask of QM_PIRQ_**I processing sources
1339 * Removes processing sources from being interrupt-driven, so that they will
1340 * instead be processed via qman_poll_***() functions. Returns zero for success,
1341 * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
1343 int qman_irqsource_remove(u32 bits);
1346 * qman_affine_channel - return the channel ID of an portal
1347 * @cpu: the cpu whose affine portal is the subject of the query
1349 * If @cpu is -1, the affine portal for the current CPU will be used. It is a
1350 * bug to call this function for any value of @cpu (other than -1) that is not a
1351 * member of the cpu mask.
1353 u16 qman_affine_channel(int cpu);
1355 unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1356 void **bufs, struct qman_portal *q);
1359 * qman_set_vdq - Issue a volatile dequeue command
1360 * @fq: Frame Queue on which the volatile dequeue command is issued
1361 * @num: Number of Frames requested for volatile dequeue
1362 * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
1364 * This function will issue a volatile dequeue command to the QMAN.
1366 int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
1369 * qman_dequeue - Get the DQRR entry after volatile dequeue command
1370 * @fq: Frame Queue on which the volatile dequeue command is issued
1372 * This function will return the DQRR entry after a volatile dequeue command
1373 * is issued. It will keep returning NULL until there is no packet available on
1376 struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
1379 * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
1380 * @fq: Frame Queue on which the volatile dequeue command is issued
1381 * @dq: DQRR entry to consume. This is the one which is provided by the
1382 * 'qbman_dequeue' command.
1384 * This will consume the DQRR enrey and make it available for next volatile
1387 void qman_dqrr_consume(struct qman_fq *fq,
1388 struct qm_dqrr_entry *dq);
1391 * qman_poll_dqrr - process DQRR (fast-path) entries
1392 * @limit: the maximum number of DQRR entries to process
1394 * Use of this function requires that DQRR processing not be interrupt-driven.
1395 * Ie. the value returned by qman_irqsource_get() should not include
1396 * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
1397 * this function will return -EINVAL, otherwise the return value is >=0 and
1398 * represents the number of DQRR entries processed.
1400 int qman_poll_dqrr(unsigned int limit);
1405 * Dispatcher logic on a cpu can use this to trigger any maintenance of the
1406 * affine portal. There are two classes of portal processing in question;
1407 * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
1408 * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
1409 * thresholds, congestion state changes, etc). This function does whatever
1410 * processing is not triggered by interrupts.
1412 * Note, if DQRR and some slow-path processing are poll-driven (rather than
1413 * interrupt-driven) then this function uses a heuristic to determine how often
1414 * to run slow-path processing - as slow-path processing introduces at least a
1415 * minimum latency each time it is run, whereas fast-path (DQRR) processing is
1416 * close to zero-cost if there is no work to be done.
1418 void qman_poll(void);
1421 * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
1423 * Disables DQRR processing of the portal. This is reference-counted, so
1424 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1425 * truly re-enable dequeuing.
1427 void qman_stop_dequeues(void);
1430 * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
1432 * Enables DQRR processing of the portal. This is reference-counted, so
1433 * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
1434 * truly re-enable dequeuing.
1436 void qman_start_dequeues(void);
1439 * qman_static_dequeue_add - Add pool channels to the portal SDQCR
1440 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1442 * Adds a set of pool channels to the portal's static dequeue command register
1443 * (SDQCR). The requested pools are limited to those the portal has dequeue
1446 void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
1449 * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
1450 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
1452 * Removes a set of pool channels from the portal's static dequeue command
1453 * register (SDQCR). The requested pools are limited to those the portal has
1454 * dequeue access to.
1456 void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
1459 * qman_static_dequeue_get - return the portal's current SDQCR
1461 * Returns the portal's current static dequeue command register (SDQCR). The
1462 * entire register is returned, so if only the currently-enabled pool channels
1463 * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
1465 u32 qman_static_dequeue_get(struct qman_portal *qp);
1468 * qman_dca - Perform a Discrete Consumption Acknowledgment
1469 * @dq: the DQRR entry to be consumed
1470 * @park_request: indicates whether the held-active @fq should be parked
1472 * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1473 * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1474 * does not take a 'portal' argument but implies the core affine portal from the
1475 * cpu that is currently executing the function. For reasons of locking, this
1476 * function must be called from the same CPU as that which processed the DQRR
1477 * entry in the first place.
1479 void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
1482 * qman_dca_index - Perform a Discrete Consumption Acknowledgment
1483 * @index: the DQRR index to be consumed
1484 * @park_request: indicates whether the held-active @fq should be parked
1486 * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
1487 * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
1488 * does not take a 'portal' argument but implies the core affine portal from the
1489 * cpu that is currently executing the function. For reasons of locking, this
1490 * function must be called from the same CPU as that which processed the DQRR
1491 * entry in the first place.
1493 void qman_dca_index(u8 index, int park_request);
1496 * qman_eqcr_is_empty - Determine if portal's EQCR is empty
1498 * For use in situations where a cpu-affine caller needs to determine when all
1499 * enqueues for the local portal have been processed by Qman but can't use the
1500 * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
1501 * The function forces tracking of EQCR consumption (which normally doesn't
1502 * happen until enqueue processing needs to find space to put new enqueue
1503 * commands), and returns zero if the ring still has unprocessed entries,
1504 * non-zero if it is empty.
1506 int qman_eqcr_is_empty(void);
1509 * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
1510 * @handler: callback for processing DCP ERNs
1511 * @affine: whether this handler is specific to the locally affine portal
1513 * If a hardware block's interface to Qman (ie. its direct-connect portal, or
1514 * DCP) is configured not to receive enqueue rejections, then any enqueues
1515 * through that DCP that are rejected will be sent to a given software portal.
1516 * If @affine is non-zero, then this handler will only be used for DCP ERNs
1517 * received on the portal affine to the current CPU. If multiple CPUs share a
1518 * portal and they all call this function, they will be setting the handler for
1519 * the same portal! If @affine is zero, then this handler will be global to all
1520 * portals handled by this instance of the driver. Only those portals that do
1521 * not have their own affine handler will use the global handler.
1523 void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
1528 * qman_create_fq - Allocates a FQ
1529 * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
1530 * @flags: bit-mask of QMAN_FQ_FLAG_*** options
1531 * @fq: memory for storing the 'fq', with callbacks filled in
1533 * Creates a frame queue object for the given @fqid, unless the
1534 * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
1535 * dynamically allocated (or the function fails if none are available). Once
1536 * created, the caller should not touch the memory at 'fq' except as extended to
1537 * adjacent memory for user-defined fields (see the definition of "struct
1538 * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
1539 * pre-existing frame-queues that aren't to be otherwise interfered with, it
1540 * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
1541 * causes the driver to honour any contextB modifications requested in the
1542 * qm_init_fq() API, as this indicates the frame queue will be consumed by a
1543 * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
1544 * software portals, the contextB field is controlled by the driver and can't be
1545 * modified by the caller. If the AS_IS flag is specified, management commands
1546 * will be used on portal @p to query state for frame queue @fqid and construct
1547 * a frame queue object based on that, rather than assuming/requiring that it be
1550 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
1553 * qman_destroy_fq - Deallocates a FQ
1554 * @fq: the frame queue object to release
1555 * @flags: bit-mask of QMAN_FQ_FREE_*** options
1557 * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
1558 * not deallocated but the caller regains ownership, to do with as desired. The
1559 * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
1560 * is specified, in which case it may also be in the 'parked' state.
1562 void qman_destroy_fq(struct qman_fq *fq, u32 flags);
1565 * qman_fq_fqid - Queries the frame queue ID of a FQ object
1566 * @fq: the frame queue object to query
1568 u32 qman_fq_fqid(struct qman_fq *fq);
1571 * qman_fq_state - Queries the state of a FQ object
1572 * @fq: the frame queue object to query
1573 * @state: pointer to state enum to return the FQ scheduling state
1574 * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
1576 * Queries the state of the FQ object, without performing any h/w commands.
1577 * This captures the state, as seen by the driver, at the time the function
1580 void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
1583 * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
1584 * @fq: the frame queue object to modify, must be 'parked' or new.
1585 * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
1586 * @opts: the FQ-modification settings, as defined in the low-level API
1588 * The @opts parameter comes from the low-level portal API. Select
1589 * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
1590 * rather than parked. NB, @opts can be NULL.
1592 * Note that some fields and options within @opts may be ignored or overwritten
1594 * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1595 * affects one frame queue: @fq).
1596 * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1597 * 'fqd' structure's 'context_b' field are sometimes overwritten;
1598 * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1599 * initialised to a value used by the driver for demux.
1600 * - if context_b is initialised for demux, so is context_a in case stashing
1601 * is requested (see item 4).
1602 * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1604 * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1605 * 'dest::channel' field will be overwritten to match the portal used to issue
1606 * the command. If the WE_DESTWQ write-enable bit had already been set by the
1607 * caller, the channel workqueue will be left as-is, otherwise the write-enable
1608 * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1609 * isn't set, the destination channel/workqueue fields and the write-enable bit
1611 * 4. if the driver overwrites context_a/b for demux, then if
1612 * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1613 * context_a.address fields and will leave the stashing fields provided by the
1614 * user alone, otherwise it will zero out the context_a.stashing fields.
1616 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1619 * qman_schedule_fq - Schedules a FQ
1620 * @fq: the frame queue object to schedule, must be 'parked'
1622 * Schedules the frame queue, which must be Parked, which takes it to
1623 * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1625 int qman_schedule_fq(struct qman_fq *fq);
1628 * qman_retire_fq - Retires a FQ
1629 * @fq: the frame queue object to retire
1630 * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
1632 * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1633 * the retirement was started asynchronously, otherwise it returns negative for
1634 * failure. When this function returns zero, @flags is set to indicate whether
1635 * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1636 * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1637 * FQRN message shows up on the portal's message ring.
1639 * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1640 * Active state), the completion will be via the message ring as a FQRN - but
1641 * the corresponding callback may occur before this function returns!! Ie. the
1642 * caller should be prepared to accept the callback as the function is called,
1643 * not only once it has returned.
1645 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1648 * qman_oos_fq - Puts a FQ "out of service"
1649 * @fq: the frame queue object to be put out-of-service, must be 'retired'
1651 * The frame queue must be retired and empty, and if any order restoration list
1652 * was released as ERNs at the time of retirement, they must all be consumed.
1654 int qman_oos_fq(struct qman_fq *fq);
1657 * qman_fq_flow_control - Set the XON/XOFF state of a FQ
1658 * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
1659 * or 'retired' or 'parked' state
1660 * @xon: boolean to set fq in XON or XOFF state
1662 * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
1663 * otherwise the IFSI interrupt will be asserted.
1665 int qman_fq_flow_control(struct qman_fq *fq, int xon);
1668 * qman_query_fq - Queries FQD fields (via h/w query command)
1669 * @fq: the frame queue object to be queried
1670 * @fqd: storage for the queried FQD fields
1672 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
1675 * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
1676 * if packets are in the frame queue. If there are no packets on frame
1677 * queue '0' is returned.
1678 * @fq: the frame queue object to be queried
1680 int qman_query_fq_has_pkts(struct qman_fq *fq);
1683 * qman_query_fq_np - Queries non-programmable FQD fields
1684 * @fq: the frame queue object to be queried
1685 * @np: storage for the queried FQD fields
1687 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1690 * qman_query_fq_frmcnt - Queries fq frame count
1691 * @fq: the frame queue object to be queried
1692 * @frm_cnt: number of frames in the queue
1694 int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
1697 * qman_query_wq - Queries work queue lengths
1698 * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
1699 * to this software portal. Otherwise, query length of WQs in a
1700 * channel specified in wq.
1701 * @wq: storage for the queried WQs lengths. Also specified the channel to
1702 * to query if query_dedicated is zero.
1704 int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
1707 * qman_volatile_dequeue - Issue a volatile dequeue command
1708 * @fq: the frame queue object to dequeue from
1709 * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1710 * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1712 * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1713 * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1714 * the VDQCR is already in use, otherwise returns non-zero for failure. If
1715 * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1716 * the VDQCR command has finished executing (ie. once the callback for the last
1717 * DQRR entry resulting from the VDQCR command has been called). If not using
1718 * the FINISH flag, completion can be determined either by detecting the
1719 * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1720 * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
1721 * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
1722 * "flags" retrieved from qman_fq_state().
1724 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1727 * qman_enqueue - Enqueue a frame to a frame queue
1728 * @fq: the frame queue object to enqueue to
1729 * @fd: a descriptor of the frame to be enqueued
1730 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1732 * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1733 * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1734 * field is ignored. The return value is non-zero on error, such as ring full
1735 * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
1736 * specified), etc. If the ring is full and FLAG_WAIT is specified, this
1737 * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
1738 * interrupt will assert when Qman consumes the EQCR entry (subject to "status
1739 * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
1740 * perform an implied "discrete consumption acknowledgment" on the dequeue
1741 * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
1742 * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
1743 * this implicit DCA can delay the release of a "held active" frame queue
1744 * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
1745 * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
1746 * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
1747 * acknowledgment should "park request" the "held active" frame queue. Ie.
1748 * when the portal eventually releases that frame queue, it will be left in the
1749 * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
1750 * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
1751 * is requested, and the FQ is a member of a congestion group, then this
1752 * function returns -EAGAIN if the congestion group is currently congested.
1753 * Note, this does not eliminate ERNs, as the async interface means we can be
1754 * sending enqueue commands to an un-congested FQ that becomes congested before
1755 * the enqueue commands are processed, but it does minimise needless thrashing
1756 * of an already busy hardware resource by throttling many of the to-be-dropped
1757 * enqueues "at the source".
1759 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
1761 int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
1762 int frames_to_send);
1765 * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
1767 * @fq[]: Array of frame queue objects to enqueue to
1768 * @fd: pointer to first descriptor of frame to be enqueued
1769 * @frames_to_send: number of frames to be sent.
1771 * This API is similar to qman_enqueue_multi(), but it takes fd which needs
1772 * to be processed by different frame queues.
1775 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
1776 int frames_to_send);
1778 typedef int (*qman_cb_precommit) (void *arg);
1781 * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
1782 * @fq: the frame queue object to enqueue to
1783 * @fd: a descriptor of the frame to be enqueued
1784 * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
1785 * @orp: the frame queue object used as an order restoration point.
1786 * @orp_seqnum: the sequence number of this frame in the order restoration path
1788 * Similar to qman_enqueue(), but with the addition of an Order Restoration
1789 * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
1790 * enqueue operation to employ order restoration. Each frame queue object acts
1791 * as an Order Definition Point (ODP) by providing each frame dequeued from it
1792 * with an incrementing sequence number, this value is generally ignored unless
1793 * that sequence of dequeued frames will need order restoration later. Each
1794 * frame queue object also encapsulates an Order Restoration Point (ORP), which
1795 * is a re-assembly context for re-ordering frames relative to their sequence
1796 * numbers as they are enqueued. The ORP does not have to be within the frame
1797 * queue that receives the enqueued frame, in fact it is usually the frame
1798 * queue from which the frames were originally dequeued. For the purposes of
1799 * order restoration, multiple frames (or "fragments") can be enqueued for a
1800 * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
1801 * enqueues except the final fragment of a given sequence number. Ordering
1802 * between sequence numbers is guaranteed, even if fragments of different
1803 * sequence numbers are interlaced with one another. Fragments of the same
1804 * sequence number will retain the order in which they are enqueued. If no
1805 * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
1806 * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
1807 * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
1808 * sequence number should become the ORP's "Next Expected Sequence Number".
1810 * Side note: a frame queue object can be used purely as an ORP, without
1811 * carrying any frames at all. Care should be taken not to deallocate a frame
1812 * queue object that is being actively used as an ORP, as a future allocation
1813 * of the frame queue object may start using the internal ORP before the
1814 * previous use has finished.
1816 int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
1817 struct qman_fq *orp, u16 orp_seqnum);
1820 * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1821 * @result: is set by the API to the base FQID of the allocated range
1822 * @count: the number of FQIDs required
1823 * @align: required alignment of the allocated range
1824 * @partial: non-zero if the API can return fewer than @count FQIDs
1826 * Returns the number of frame queues allocated, or a negative error code. If
1827 * @partial is non zero, the allocation request may return a smaller range of
1828 * FQs than requested (though alignment will be as requested). If @partial is
1829 * zero, the return value will either be 'count' or negative.
1831 int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
1832 static inline int qman_alloc_fqid(u32 *result)
1834 int ret = qman_alloc_fqid_range(result, 1, 0, 0);
1836 return (ret > 0) ? 0 : ret;
1840 * qman_release_fqid_range - Release the specified range of frame queue IDs
1841 * @fqid: the base FQID of the range to deallocate
1842 * @count: the number of FQIDs in the range
1844 * This function can also be used to seed the allocator with ranges of FQIDs
1845 * that it can subsequently allocate from.
1847 void qman_release_fqid_range(u32 fqid, unsigned int count);
1848 static inline void qman_release_fqid(u32 fqid)
1850 qman_release_fqid_range(fqid, 1);
1853 void qman_seed_fqid_range(u32 fqid, unsigned int count);
1855 int qman_shutdown_fq(u32 fqid);
1858 * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
1859 * @fqid: the base FQID of the range to deallocate
1860 * @count: the number of FQIDs in the range
1862 int qman_reserve_fqid_range(u32 fqid, unsigned int count);
1863 static inline int qman_reserve_fqid(u32 fqid)
1865 return qman_reserve_fqid_range(fqid, 1);
1868 /* Pool-channel management */
1870 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1871 * @result: is set by the API to the base pool-channel ID of the allocated range
1872 * @count: the number of pool-channel IDs required
1873 * @align: required alignment of the allocated range
1874 * @partial: non-zero if the API can return fewer than @count
1876 * Returns the number of pool-channel IDs allocated, or a negative error code.
1877 * If @partial is non zero, the allocation request may return a smaller range of
1878 * than requested (though alignment will be as requested). If @partial is zero,
1879 * the return value will either be 'count' or negative.
1881 int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
1882 static inline int qman_alloc_pool(u32 *result)
1884 int ret = qman_alloc_pool_range(result, 1, 0, 0);
1886 return (ret > 0) ? 0 : ret;
1890 * qman_release_pool_range - Release the specified range of pool-channel IDs
1891 * @id: the base pool-channel ID of the range to deallocate
1892 * @count: the number of pool-channel IDs in the range
1894 void qman_release_pool_range(u32 id, unsigned int count);
1895 static inline void qman_release_pool(u32 id)
1897 qman_release_pool_range(id, 1);
1901 * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
1902 * @id: the base pool-channel ID of the range to reserve
1903 * @count: the number of pool-channel IDs in the range
1905 int qman_reserve_pool_range(u32 id, unsigned int count);
1906 static inline int qman_reserve_pool(u32 id)
1908 return qman_reserve_pool_range(id, 1);
1911 void qman_seed_pool_range(u32 id, unsigned int count);
1913 /* CGR management */
1914 /* -------------- */
1916 * qman_create_cgr - Register a congestion group object
1917 * @cgr: the 'cgr' object, with fields filled in
1918 * @flags: QMAN_CGR_FLAG_* values
1919 * @opts: optional state of CGR settings
1921 * Registers this object to receiving congestion entry/exit callbacks on the
1922 * portal affine to the cpu portal on which this API is executed. If opts is
1923 * NULL then only the callback (cgr->cb) function is registered. If @flags
1924 * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1925 * any unspecified parameters) will be used rather than a modify hw hardware
1926 * (which only modifies the specified parameters).
1928 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1929 struct qm_mcc_initcgr *opts);
1932 * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
1933 * @cgr: the 'cgr' object, with fields filled in
1934 * @flags: QMAN_CGR_FLAG_* values
1935 * @dcp_portal: the DCP portal to which the cgr object is registered.
1936 * @opts: optional state of CGR settings
1939 int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
1940 struct qm_mcc_initcgr *opts);
1943 * qman_delete_cgr - Deregisters a congestion group object
1944 * @cgr: the 'cgr' object to deregister
1946 * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1947 * is executed. This must be excuted on the same affine portal on which it was
1950 int qman_delete_cgr(struct qman_cgr *cgr);
1953 * qman_modify_cgr - Modify CGR fields
1954 * @cgr: the 'cgr' object to modify
1955 * @flags: QMAN_CGR_FLAG_* values
1956 * @opts: the CGR-modification settings
1958 * The @opts parameter comes from the low-level portal API, and can be NULL.
1959 * Note that some fields and options within @opts may be ignored or overwritten
1960 * by the driver, in particular the 'cgrid' field is ignored (this operation
1961 * only affects the given CGR object). If @flags contains
1962 * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
1963 * unspecified parameters) will be used rather than a modify hw hardware (which
1964 * only modifies the specified parameters).
1966 int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
1967 struct qm_mcc_initcgr *opts);
1970 * qman_query_cgr - Queries CGR fields
1971 * @cgr: the 'cgr' object to query
1972 * @result: storage for the queried congestion group record
1974 int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
1977 * qman_query_congestion - Queries the state of all congestion groups
1978 * @congestion: storage for the queried state of all congestion groups
1980 int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
1983 * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1984 * @result: is set by the API to the base CGR ID of the allocated range
1985 * @count: the number of CGR IDs required
1986 * @align: required alignment of the allocated range
1987 * @partial: non-zero if the API can return fewer than @count
1989 * Returns the number of CGR IDs allocated, or a negative error code.
1990 * If @partial is non zero, the allocation request may return a smaller range of
1991 * than requested (though alignment will be as requested). If @partial is zero,
1992 * the return value will either be 'count' or negative.
1994 int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
1995 static inline int qman_alloc_cgrid(u32 *result)
1997 int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
1999 return (ret > 0) ? 0 : ret;
2003 * qman_release_cgrid_range - Release the specified range of CGR IDs
2004 * @id: the base CGR ID of the range to deallocate
2005 * @count: the number of CGR IDs in the range
2007 void qman_release_cgrid_range(u32 id, unsigned int count);
2008 static inline void qman_release_cgrid(u32 id)
2010 qman_release_cgrid_range(id, 1);
2014 * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
2015 * @id: the base CGR ID of the range to reserve
2016 * @count: the number of CGR IDs in the range
2018 int qman_reserve_cgrid_range(u32 id, unsigned int count);
2019 static inline int qman_reserve_cgrid(u32 id)
2021 return qman_reserve_cgrid_range(id, 1);
2024 void qman_seed_cgrid_range(u32 id, unsigned int count);
2029 * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
2030 * @fqid: the FQID that will be initialised by other s/w
2032 * In many situations, a FQID is provided for communication between s/w
2033 * entities, and whilst the consumer is responsible for initialising and
2034 * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
2035 * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
2036 * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
2037 * However, data can not be enqueued to the FQ until it is initialised out of
2038 * the OOS state - this function polls for that condition. It is particularly
2039 * useful for users of IPC functions - each endpoint's Rx FQ is the other
2040 * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
2041 * and then use this API on the (NO_MODIFY) Tx FQ object in order to
2042 * synchronise. The function returns zero for success, +1 if the FQ is still in
2043 * the OOS state, or negative if there was an error.
2045 static inline int qman_poll_fq_for_init(struct qman_fq *fq)
2047 struct qm_mcr_queryfq_np np;
2050 err = qman_query_fq_np(fq, &np);
2053 if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
2058 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
2059 #define cpu_to_hw_sg(x)
2060 #define hw_sg_to_cpu(x)
2062 #define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
2063 #define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
2065 static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
2067 sgentry->opaque = cpu_to_be64(sgentry->opaque);
2068 sgentry->val = cpu_to_be32(sgentry->val);
2069 sgentry->val_off = cpu_to_be16(sgentry->val_off);
2072 static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
2074 sgentry->opaque = be64_to_cpu(sgentry->opaque);
2075 sgentry->val = be32_to_cpu(sgentry->val);
2076 sgentry->val_off = be16_to_cpu(sgentry->val_off);
2084 #endif /* __FSL_QMAN_H */