1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014 Freescale Semiconductor, Inc.
6 #ifndef _FSL_QBMAN_PORTAL_H
7 #define _FSL_QBMAN_PORTAL_H
9 #include <fsl_qbman_base.h>
12 * DOC - QBMan portal APIs to implement the following functions:
13 * - Initialize and destroy Software portal object.
14 * - Read and write Software portal interrupt registers.
15 * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
17 * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
18 * parsing the dequeue response in DQRR and memeory, parsing the state change
20 * - Release, including setting the release descriptor, and issuing the buffer
22 * - Acquire, acquire the buffer from the given buffer pool.
24 * - Channel management, enable/disable CDAN with or without context.
28 * qbman_swp_init() - Create a functional object representing the given
29 * QBMan portal descriptor.
30 * @d: the given qbman swp descriptor
32 * Return qbman_swp portal object for success, NULL if the object cannot
35 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
38 * qbman_swp_finish() - Create and destroy a functional object representing
39 * the given QBMan portal descriptor.
40 * @p: the qbman_swp object to be destroyed.
43 void qbman_swp_finish(struct qbman_swp *p);
46 * qbman_swp_get_desc() - Get the descriptor of the given portal object.
47 * @p: the given portal object.
49 * Return the descriptor for this portal.
51 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
57 /* EQCR ring interrupt */
58 #define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
59 /* Enqueue command dispatched interrupt */
60 #define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
61 /* DQRR non-empty interrupt */
62 #define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
63 /* RCR ring interrupt */
64 #define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
65 /* Release command dispatched interrupt */
66 #define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
67 /* Volatile dequeue command interrupt */
68 #define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
71 * qbman_swp_interrupt_get_vanish() - Get the data in software portal
72 * interrupt status disable register.
73 * @p: the given software portal object.
75 * Return the settings in SWP_ISDR register.
77 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
80 * qbman_swp_interrupt_set_vanish() - Set the data in software portal
81 * interrupt status disable register.
82 * @p: the given software portal object.
83 * @mask: The value to set in SWP_IDSR register.
85 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
88 * qbman_swp_interrupt_read_status() - Get the data in software portal
89 * interrupt status register.
90 * @p: the given software portal object.
92 * Return the settings in SWP_ISR register.
94 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
97 * qbman_swp_interrupt_clear_status() - Set the data in software portal
98 * interrupt status register.
99 * @p: the given software portal object.
100 * @mask: The value to set in SWP_ISR register.
102 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
105 * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
106 * DQRR interrupt threshold register.
107 * @p: the given software portal object.
109 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
112 * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
113 * DQRR interrupt threshold register.
114 * @p: the given software portal object.
115 * @mask: The value to set in SWP_DQRR_ITR register.
117 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
120 * qbman_swp_intr_timeout_read_status() - Get the data in software portal
121 * Interrupt Time-Out period register.
122 * @p: the given software portal object.
124 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
127 * qbman_swp_intr_timeout_write() - Set the data in software portal
128 * Interrupt Time-Out period register.
129 * @p: the given software portal object.
130 * @mask: The value to set in SWP_ITPR register.
132 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
135 * qbman_swp_interrupt_get_trigger() - Get the data in software portal
136 * interrupt enable register.
137 * @p: the given software portal object.
139 * Return the settings in SWP_IER register.
141 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
144 * qbman_swp_interrupt_set_trigger() - Set the data in software portal
145 * interrupt enable register.
146 * @p: the given software portal object.
147 * @mask: The value to set in SWP_IER register.
149 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
152 * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
153 * interrupt inhibit register.
154 * @p: the given software portal object.
156 * Return the settings in SWP_IIR register.
158 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
161 * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
162 * interrupt inhibit register.
163 * @p: the given software portal object.
164 * @mask: The value to set in SWP_IIR register.
166 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
173 * struct qbman_result - structure for qbman dequeue response and/or
175 * @donot_manipulate_directly: the 16 32bit data to represent the whole
176 * possible qbman dequeue result.
178 struct qbman_result {
182 uint8_t reserved[63];
210 *A DQRI interrupt can be generated when there are dequeue results on the
211 * portal's DQRR (this mechanism does not deal with "pull" dequeues to
212 * user-supplied 'storage' addresses). There are two parameters to this
213 * interrupt source, one is a threshold and the other is a timeout. The
214 * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
215 * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
216 * For timeout, an approximation to the desired nanosecond-granularity value is
217 * made, so there are get and set APIs to allow the user to see what actual
218 * timeout is set (compared to the timeout that was requested).
220 int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
221 int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
222 int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
224 /* ------------------- */
225 /* Push-mode dequeuing */
226 /* ------------------- */
228 /* The user of a portal can enable and disable push-mode dequeuing of up to 16
229 * channels independently. It does not specify this toggling by channel IDs, but
230 * rather by specifying the index (from 0 to 15) that has been mapped to the
235 * qbman_swp_push_get() - Get the push dequeue setup.
236 * @s: the software portal object.
237 * @channel_idx: the channel index to query.
238 * @enabled: returned boolean to show whether the push dequeue is enabled for
241 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
244 * qbman_swp_push_set() - Enable or disable push dequeue.
245 * @s: the software portal object.
246 * @channel_idx: the channel index..
247 * @enable: enable or disable push dequeue.
249 * The user of a portal can enable and disable push-mode dequeuing of up to 16
250 * channels independently. It does not specify this toggling by channel IDs, but
251 * rather by specifying the index (from 0 to 15) that has been mapped to the
254 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
256 /* ------------------- */
257 /* Pull-mode dequeuing */
258 /* ------------------- */
261 * struct qbman_pull_desc - the structure for pull dequeue descriptor
263 struct qbman_pull_desc {
265 uint32_t donot_manipulate_directly[16];
273 uint64_t rsp_addr_virt;
279 enum qbman_pull_type_e {
280 /* dequeue with priority precedence, respect intra-class scheduling */
281 qbman_pull_type_prio = 1,
282 /* dequeue with active FQ precedence, respect ICS */
283 qbman_pull_type_active,
284 /* dequeue with active FQ precedence, no ICS */
285 qbman_pull_type_active_noics
289 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
290 * default/starting state.
291 * @d: the pull dequeue descriptor to be cleared.
293 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
296 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
297 * @d: the pull dequeue descriptor to be set.
298 * @storage: the pointer of the memory to store the dequeue result.
299 * @storage_phys: the physical address of the storage memory.
300 * @stash: to indicate whether write allocate is enabled.
302 * If not called, or if called with 'storage' as NULL, the result pull dequeues
303 * will produce results to DQRR. If 'storage' is non-NULL, then results are
304 * produced to the given memory location (using the physical/DMA address which
305 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
306 * those writes to main-memory express a cache-warming attribute.
308 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
309 struct qbman_result *storage,
310 uint64_t storage_phys,
313 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
314 * @d: the pull dequeue descriptor to be set.
315 * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
317 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
320 * qbman_pull_desc_set_token() - Set dequeue token for pull command
321 * @d: the dequeue descriptor
322 * @token: the token to be set
324 * token is the value that shows up in the dequeue response that can be used to
325 * detect when the results have been published. The easiest technique is to zero
326 * result "storage" before issuing a dequeue, and use any non-zero 'token' value
328 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
330 /* Exactly one of the following descriptor "actions" should be set. (Calling any
331 * one of these will replace the effect of any prior call to one of these.)
332 * - pull dequeue from the given frame queue (FQ)
333 * - pull dequeue from any FQ in the given work queue (WQ)
334 * - pull dequeue from any FQ in any WQ in the given channel
337 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
338 * @fqid: the frame queue index of the given FQ.
340 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
343 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
344 * @wqid: composed of channel id and wqid within the channel.
345 * @dct: the dequeue command type.
347 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
348 enum qbman_pull_type_e dct);
350 /* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
352 * @chid: the channel id to be dequeued.
353 * @dct: the dequeue command type.
355 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
356 enum qbman_pull_type_e dct);
359 * qbman_swp_pull() - Issue the pull dequeue command
360 * @s: the software portal object.
361 * @d: the software portal descriptor which has been configured with
362 * the set of qbman_pull_desc_set_*() calls.
364 * Return 0 for success, and -EBUSY if the software portal is not ready
365 * to do pull dequeue.
367 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
369 /* -------------------------------- */
370 /* Polling DQRR for dequeue results */
371 /* -------------------------------- */
374 * qbman_swp_dqrr_next() - Get an valid DQRR entry.
375 * @s: the software portal object.
377 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
378 * only once, so repeated calls can return a sequence of DQRR entries, without
379 * requiring they be consumed immediately or in any particular order.
381 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
384 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
385 * qbman_swp_dqrr_next().
386 * @s: the software portal object.
387 * @dq: the DQRR entry to be consumed.
389 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
392 * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
393 * @s: the software portal object.
394 * @dqrr_index: the DQRR index entry to be consumed.
396 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
399 * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
400 * @dqrr: the given dqrr object.
404 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
407 * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
409 * @s: the given portal.
410 * @idx: the dqrr index.
412 * Return dqrr entry object.
414 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
416 /* ------------------------------------------------- */
417 /* Polling user-provided storage for dequeue results */
418 /* ------------------------------------------------- */
421 * qbman_result_has_new_result() - Check and get the dequeue response from the
422 * dq storage memory set in pull dequeue command
423 * @s: the software portal object.
424 * @dq: the dequeue result read from the memory.
426 * Only used for user-provided storage of dequeue results, not DQRR. For
427 * efficiency purposes, the driver will perform any required endianness
428 * conversion to ensure that the user's dequeue result storage is in host-endian
429 * format (whether or not that is the same as the little-endian format that
430 * hardware DMA'd to the user's storage). As such, once the user has called
431 * qbman_result_has_new_result() and been returned a valid dequeue result,
432 * they should not call it again on the same memory location (except of course
433 * if another dequeue command has been executed to produce a new result to that
436 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
439 int qbman_result_has_new_result(struct qbman_swp *s,
440 struct qbman_result *dq);
443 * qbman_check_command_complete() - Check if the previous issued dq commnd
444 * is completed and results are available in memory.
445 * @s: the software portal object.
446 * @dq: the dequeue result read from the memory.
448 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
451 int qbman_check_command_complete(struct qbman_result *dq);
453 int qbman_check_new_result(struct qbman_result *dq);
455 /* -------------------------------------------------------- */
456 /* Parsing dequeue entries (DQRR and user-provided storage) */
457 /* -------------------------------------------------------- */
460 * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
461 * @dq: the dequeue result to be checked.
463 * DQRR entries may contain non-dequeue results, ie. notifications
465 int qbman_result_is_DQ(const struct qbman_result *dq);
468 * qbman_result_is_SCN() - Check the dequeue result is notification or not
469 * @dq: the dequeue result to be checked.
471 * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
472 * notifications" of one type or another. Some APIs apply to all of them, of the
473 * form qbman_result_SCN_***().
475 static inline int qbman_result_is_SCN(const struct qbman_result *dq)
477 return !qbman_result_is_DQ(dq);
480 /* Recognise different notification types, only required if the user allows for
481 * these to occur, and cares about them when they do.
485 * qbman_result_is_FQDAN() - Check for FQ Data Availability
486 * @dq: the qbman_result object.
488 * Return 1 if this is FQDAN.
490 int qbman_result_is_FQDAN(const struct qbman_result *dq);
493 * qbman_result_is_CDAN() - Check for Channel Data Availability
494 * @dq: the qbman_result object to check.
496 * Return 1 if this is CDAN.
498 int qbman_result_is_CDAN(const struct qbman_result *dq);
501 * qbman_result_is_CSCN() - Check for Congestion State Change
502 * @dq: the qbman_result object to check.
504 * Return 1 if this is CSCN.
506 int qbman_result_is_CSCN(const struct qbman_result *dq);
509 * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
510 * @dq: the qbman_result object to check.
512 * Return 1 if this is BPSCN.
514 int qbman_result_is_BPSCN(const struct qbman_result *dq);
517 * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
518 * @dq: the qbman_result object to check.
520 * Return 1 if this is CGCU.
522 int qbman_result_is_CGCU(const struct qbman_result *dq);
524 /* Frame queue state change notifications; (FQDAN in theory counts too as it
525 * leaves a FQ parked, but it is primarily a data availability notification)
529 * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
530 * @dq: the qbman_result object to check.
532 * Return 1 if this is FQRN.
534 int qbman_result_is_FQRN(const struct qbman_result *dq);
537 * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
538 * @dq: the qbman_result object to check.
540 * Return 1 if this is FQRNI.
542 int qbman_result_is_FQRNI(const struct qbman_result *dq);
545 * qbman_result_is_FQPN() - Check for FQ Park Notification
546 * @dq: the qbman_result object to check.
548 * Return 1 if this is FQPN.
550 int qbman_result_is_FQPN(const struct qbman_result *dq);
552 /* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
555 #define QBMAN_DQ_STAT_FQEMPTY 0x80
557 #define QBMAN_DQ_STAT_HELDACTIVE 0x40
558 /* FQ force eligible */
559 #define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
561 #define QBMAN_DQ_STAT_VALIDFRAME 0x10
563 #define QBMAN_DQ_STAT_ODPVALID 0x04
564 /* Volatile dequeue */
565 #define QBMAN_DQ_STAT_VOLATILE 0x02
566 /* volatile dequeue command is expired */
567 #define QBMAN_DQ_STAT_EXPIRED 0x01
569 #define QBMAN_EQCR_DCA_IDXMASK 0x0f
570 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
573 * qbman_result_DQ_flags() - Get the STAT field of dequeue response
574 * @dq: the dequeue result.
576 * Return the state field.
578 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
581 * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
583 * @dq: the dequeue result.
585 * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
587 static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
589 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
593 * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
595 * @dq: the dequeue result.
599 static inline int qbman_result_DQ_is_pull_complete(
600 const struct qbman_result *dq)
602 return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
606 * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
607 * seqnum is valid only if VALIDFRAME flag is TRUE
608 * @dq: the dequeue result.
612 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
615 * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
616 * odpid is valid only if ODPVAILD flag is TRUE.
617 * @dq: the dequeue result.
621 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
624 * qbman_result_DQ_fqid() - Get the fqid in dequeue response
625 * @dq: the dequeue result.
629 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
632 * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
633 * @dq: the dequeue result.
635 * Return the byte count remaining in the FQ.
637 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
640 * qbman_result_DQ_frame_count - Get the frame count in dequeue response
641 * @dq: the dequeue result.
643 * Return the frame count remaining in the FQ.
645 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
648 * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
649 * @dq: the dequeue result.
651 * Return the frame queue context.
653 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
656 * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
657 * @dq: the dequeue result.
659 * Return the frame descriptor.
661 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
663 /* State-change notifications (FQDAN/CDAN/CSCN/...). */
666 * qbman_result_SCN_state() - Get the state field in State-change notification
667 * @scn: the state change notification.
669 * Return the state in the notifiation.
671 uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
674 * qbman_result_SCN_rid() - Get the resource id from the notification
675 * @scn: the state change notification.
677 * Return the resource id.
679 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
682 * qbman_result_SCN_ctx() - get the context from the notification
683 * @scn: the state change notification.
685 * Return the context.
687 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
689 /* Type-specific "resource IDs". Mainly for illustration purposes, though it
690 * also gives the appropriate type widths.
692 /* Get the FQID from the FQDAN */
693 #define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
694 /* Get the FQID from the FQRN */
695 #define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
696 /* Get the FQID from the FQRNI */
697 #define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
698 /* Get the FQID from the FQPN */
699 #define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
700 /* Get the channel ID from the CDAN */
701 #define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
702 /* Get the CGID from the CSCN */
703 #define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
706 * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
707 * @scn: the state change notification.
709 * Return the buffer pool id.
711 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
714 * qbman_result_bpscn_has_free_bufs() - Check whether there are free
715 * buffers in the pool from BPSCN.
716 * @scn: the state change notification.
718 * Return the number of free buffers.
720 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
723 * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
724 * buffer pool is depleted.
725 * @scn: the state change notification.
727 * Return the status of buffer pool depletion.
729 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
732 * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
733 * pool is surplus or not.
734 * @scn: the state change notification.
736 * Return the status of buffer pool surplus.
738 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
741 * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
742 * @scn: the state change notification.
744 * Return the BPSCN context.
746 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
750 * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
751 * @scn: the state change notification.
753 * Return the CGCU resource id.
755 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
758 * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
759 * @scn: the state change notification.
761 * Return instantaneous count in the CGCU notification.
763 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
769 /* struct qbman_eq_desc - structure of enqueue descriptor */
770 struct qbman_eq_desc {
772 uint32_t donot_manipulate_directly[8];
792 * struct qbman_eq_response - structure of enqueue response
793 * @donot_manipulate_directly: the 16 32bit data to represent the whole
796 struct qbman_eq_response {
797 uint32_t donot_manipulate_directly[16];
801 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
802 * default/starting state.
803 * @d: the given enqueue descriptor.
805 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
807 /* Exactly one of the following descriptor "actions" should be set. (Calling
808 * any one of these will replace the effect of any prior call to one of these.)
809 * - enqueue without order-restoration
810 * - enqueue with order-restoration
811 * - fill a hole in the order-restoration sequence, without any enqueue
812 * - advance NESN (Next Expected Sequence Number), without any enqueue
813 * 'respond_success' indicates whether an enqueue response should be DMA'd
814 * after success (otherwise a response is DMA'd only after failure).
815 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
820 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
821 * @d: the enqueue descriptor.
822 * @response_success: 1 = enqueue with response always; 0 = enqueue with
823 * rejections returned on a FQ.
825 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
827 * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
828 * @d: the enqueue descriptor.
829 * @response_success: 1 = enqueue with response always; 0 = enqueue with
830 * rejections returned on a FQ.
831 * @opr_id: the order point record id.
832 * @seqnum: the order restoration sequence number.
833 * @incomplete: indiates whether this is the last fragments using the same
836 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
837 uint16_t opr_id, uint16_t seqnum, int incomplete);
840 * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
841 * without any enqueue
842 * @d: the enqueue descriptor.
843 * @opr_id: the order point record id.
844 * @seqnum: the order restoration sequence number.
846 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
850 * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
851 * without any enqueue
852 * @d: the enqueue descriptor.
853 * @opr_id: the order point record id.
854 * @seqnum: the order restoration sequence number.
856 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
859 * qbman_eq_desc_set_response() - Set the enqueue response info.
860 * @d: the enqueue descriptor
861 * @storage_phys: the physical address of the enqueue response in memory.
862 * @stash: indicate that the write allocation enabled or not.
864 * In the case where an enqueue response is DMA'd, this determines where that
865 * response should go. (The physical/DMA address is given for hardware's
866 * benefit, but software should interpret it as a "struct qbman_eq_response"
867 * data structure.) 'stash' controls whether or not the write to main-memory
868 * expresses a cache-warming attribute.
870 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
871 uint64_t storage_phys,
875 * qbman_eq_desc_set_token() - Set token for the enqueue command
876 * @d: the enqueue descriptor
877 * @token: the token to be set.
879 * token is the value that shows up in an enqueue response that can be used to
880 * detect when the results have been published. The easiest technique is to zero
881 * result "storage" before issuing an enqueue, and use any non-zero 'token'
884 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
887 * Exactly one of the following descriptor "targets" should be set. (Calling any
888 * one of these will replace the effect of any prior call to one of these.)
889 * - enqueue to a frame queue
890 * - enqueue to a queuing destination
891 * Note, that none of these will have any affect if the "action" type has been
892 * set to "orp_hole" or "orp_nesn".
895 * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
896 * @d: the enqueue descriptor
897 * @fqid: the id of the frame queue to be enqueued.
899 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
902 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
903 * @d: the enqueue descriptor
904 * @qdid: the id of the queuing destination to be enqueued.
905 * @qd_bin: the queuing destination bin
906 * @qd_prio: the queuing destination priority.
908 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
909 uint16_t qd_bin, uint8_t qd_prio);
912 * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
913 * @d: the enqueue descriptor
914 * @enable: boolean to enable/disable EQDI
916 * Determines whether or not the portal's EQDI interrupt source should be
917 * asserted after the enqueue command is completed.
919 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
922 * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
923 * @d: the enqueue descriptor.
924 * @enable: enabled/disable DCA mode.
925 * @dqrr_idx: DCAP_CI, the DCAP consumer index.
926 * @park: determine the whether park the FQ or not
928 * Determines whether or not a portal DQRR entry should be consumed once the
929 * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
930 * held-active (order-preserving) FQ, whether the FQ should be parked instead of
931 * being rescheduled.)
933 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
934 uint8_t dqrr_idx, int park);
937 * qbman_swp_enqueue() - Issue an enqueue command.
938 * @s: the software portal used for enqueue.
939 * @d: the enqueue descriptor.
940 * @fd: the frame descriptor to be enqueued.
942 * Please note that 'fd' should only be NULL if the "action" of the
943 * descriptor is "orp_hole" or "orp_nesn".
945 * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
947 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
948 const struct qbman_fd *fd);
950 * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
952 * @s: the software portal used for enqueue.
953 * @d: the enqueue descriptor.
954 * @fd: the frame descriptor to be enqueued.
955 * @num_frames: the number of the frames to be enqueued.
957 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
959 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
960 const struct qbman_eq_desc *d,
961 const struct qbman_fd *fd,
965 * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
966 * individual eq descriptor.
967 * @s: the software portal used for enqueue.
968 * @d: the enqueue descriptor.
969 * @fd: the frame descriptor to be enqueued.
970 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
971 * @num_frames: the number of the frames to be enqueued.
973 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
975 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
976 const struct qbman_eq_desc *d,
977 const struct qbman_fd *fd,
981 * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
982 * @s: the software portal.
983 * @thresh: the threshold to trigger the EQRI interrupt.
985 * An EQRI interrupt can be generated when the fill-level of EQCR falls below
986 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
988 int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
990 /*******************/
991 /* Buffer releases */
992 /*******************/
994 * struct qbman_release_desc - The structure for buffer release descriptor
995 * @donot_manipulate_directly: the 32bit data to represent the whole
996 * possible settings of qbman release descriptor.
998 struct qbman_release_desc {
1000 uint32_t donot_manipulate_directly[16];
1012 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1013 * default/starting state.
1014 * @d: the qbman release descriptor.
1016 void qbman_release_desc_clear(struct qbman_release_desc *d);
1019 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1020 * @d: the qbman release descriptor.
1022 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
1025 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1026 * interrupt source should be asserted after the release command is completed.
1027 * @d: the qbman release descriptor.
1029 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
1032 * qbman_swp_release() - Issue a buffer release command.
1033 * @s: the software portal object.
1034 * @d: the release descriptor.
1035 * @buffers: a pointer pointing to the buffer address to be released.
1036 * @num_buffers: number of buffers to be released, must be less than 8.
1038 * Return 0 for success, -EBUSY if the release command ring is not ready.
1040 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1041 const uint64_t *buffers, unsigned int num_buffers);
1044 * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
1045 * @s: the software portal.
1046 * @thresh: the threshold.
1047 * An RCRI interrupt can be generated when the fill-level of RCR falls below
1048 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1050 int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
1052 /*******************/
1053 /* Buffer acquires */
1054 /*******************/
1056 * qbman_swp_acquire() - Issue a buffer acquire command.
1057 * @s: the software portal object.
1058 * @bpid: the buffer pool index.
1059 * @buffers: a pointer pointing to the acquired buffer address|es.
1060 * @num_buffers: number of buffers to be acquired, must be less than 8.
1062 * Return 0 for success, or negative error code if the acquire command
1065 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1066 unsigned int num_buffers);
1072 * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
1073 * @s: the software portal object.
1074 * @fqid: the index of frame queue to be scheduled.
1076 * There are a couple of different ways that a FQ can end up parked state,
1077 * This schedules it.
1079 * Return 0 for success, or negative error code for failure.
1081 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
1084 * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
1085 * @s: the software portal object.
1086 * @fqid: the index of frame queue to be forced.
1088 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
1089 * and thus be available for selection by any channel-dequeuing behaviour (push
1090 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
1091 * empty at the time this happens, the resulting dq_entry will have no FD.
1092 * (qbman_result_DQ_fd() will return NULL.)
1094 * Return 0 for success, or negative error code for failure.
1096 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
1099 * These functions change the FQ flow-control stuff between XON/XOFF. (The
1100 * default is XON.) This setting doesn't affect enqueues to the FQ, just
1101 * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
1102 * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
1103 * changed to XOFF after it had already become truly-scheduled to a channel, and
1104 * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
1105 * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
1109 * qbman_swp_fq_xon() - XON the frame queue.
1110 * @s: the software portal object.
1111 * @fqid: the index of frame queue.
1113 * Return 0 for success, or negative error code for failure.
1115 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
1117 * qbman_swp_fq_xoff() - XOFF the frame queue.
1118 * @s: the software portal object.
1119 * @fqid: the index of frame queue.
1121 * Return 0 for success, or negative error code for failure.
1123 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
1125 /**********************/
1126 /* Channel management */
1127 /**********************/
1130 * If the user has been allocated a channel object that is going to generate
1131 * CDANs to another channel, then these functions will be necessary.
1132 * CDAN-enabled channels only generate a single CDAN notification, after which
1133 * it they need to be reenabled before they'll generate another. (The idea is
1134 * that pull dequeuing will occur in reaction to the CDAN, followed by a
1135 * reenable step.) Each function generates a distinct command to hardware, so a
1136 * combination function is provided if the user wishes to modify the "context"
1137 * (which shows up in each CDAN message) each time they reenable, as a single
1138 * command to hardware.
1142 * qbman_swp_CDAN_set_context() - Set CDAN context
1143 * @s: the software portal object.
1144 * @channelid: the channel index.
1145 * @ctx: the context to be set in CDAN.
1147 * Return 0 for success, or negative error code for failure.
1149 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1153 * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
1154 * @s: the software portal object.
1155 * @channelid: the index of the channel to generate CDAN.
1157 * Return 0 for success, or negative error code for failure.
1159 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
1162 * qbman_swp_CDAN_disable() - disable CDAN for the channel.
1163 * @s: the software portal object.
1164 * @channelid: the index of the channel to generate CDAN.
1166 * Return 0 for success, or negative error code for failure.
1168 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
1171 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
1172 * @s: the software portal object.
1173 * @channelid: the index of the channel to generate CDAN.
1174 * @ctx: the context set in CDAN.
1176 * Return 0 for success, or negative error code for failure.
1178 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1180 #endif /* !_FSL_QBMAN_PORTAL_H */