/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*
*/
u32 cmd;
u32 status;
};
-} __attribute__((aligned(8)));
+} __rte_aligned(8);
#define QM_FD_DD_NULL 0x00
#define QM_FD_PID_MASK 0x3f
static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
} while (0)
/* See 1.5.8.1: "Enqueue Command" */
-struct qm_eqcr_entry {
+struct __rte_aligned(8) qm_eqcr_entry {
u8 __dont_write_directly__verb;
u8 dca;
u16 seqnum;
u32 orp; /* 24-bit */
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved3[32];
} __packed;
/* "Frame Dequeue Response" */
-struct qm_dqrr_entry {
+struct __rte_aligned(8) qm_dqrr_entry {
u8 verb;
u8 stat;
u16 seqnum; /* 15-bit */
u8 __reserved2[3];
u32 fqid; /* 24-bit */
u32 contextB;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved4[32];
};
/* "ERN Message Response" */
/* "FQ State Change Notification" */
struct qm_mr_entry {
- u8 verb;
union {
struct {
+ u8 verb;
u8 dca;
u16 seqnum;
u8 rc; /* Rejection Code */
u32 orp:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed ern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
struct {
+ u8 verb;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
u8 __reserved1:4;
u32 __reserved3:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed dcern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
struct {
+ u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
u32 fqid; /* 24-bit */
u32 contextB;
u8 __reserved2[16];
- } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
};
u8 __reserved2[32];
-} __packed;
+} __packed __rte_aligned(8);
#define QM_MR_VERB_VBIT 0x80
/*
* ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
const struct qm_dqrr_entry *dqrr,
void **bd);
+/* This callback type is used when handling buffers in dpdk pull mode */
+typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr,
+ void **bufs,
+ int num_bufs);
+
+typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
+
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
* are always consumed after the callback returns.
struct qman_fq_cb {
union { /* for dequeued frames */
qman_dpdk_cb_dqrr dqrr_dpdk_cb;
+ qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
qman_cb_dqrr dqrr;
};
+ qman_dpdk_cb_prepare dqrr_prepare;
qman_cb_mr ern; /* for s/w ERNs */
qman_cb_mr fqs; /* frame-queue state changes*/
};
struct qman_fq_cb cb;
u32 fqid_le;
+ u32 fqid;
+
+ int q_fd;
u16 ch_id;
u8 cgr_groupid;
- u8 is_static;
+ u8 is_static:4;
+ u8 qp_initialized:4;
/* DPDK Interface */
void *dpaa_intf;
struct rte_event ev;
/* affined portal in case of static queue */
struct qman_portal *qp;
+ struct dpaa_bp_info *bp_array;
volatile unsigned long flags;
enum qman_fq_state state;
- u32 fqid;
spinlock_t fqlock;
struct rb_node node;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ void **qman_fq_lookup_table;
u32 key;
#endif
};
#define QMAN_CGR_FLAG_USE_INIT 0x00000001
#define QMAN_CGR_MODE_FRAME 0x00000001
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+__rte_internal
+void qman_set_fq_lookup_table(void **table);
+#endif
+
/**
* qman_get_portal_index - get portal configuration index
*/
int qman_get_portal_index(void);
+__rte_internal
u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
void **bufs);
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+__rte_internal
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
+ * takes portal (fq specific) as input rather than using the thread affined
+ * portal.
+ */
+__rte_internal
+int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+__rte_internal
+int qman_irqsource_remove(u32 bits);
+
+/**
+ * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it
+ * takes portal (fq specific) as input rather than using the thread affined
+ * portal.
+ */
+__rte_internal
+int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
+
/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
*/
u16 qman_affine_channel(int cpu);
+__rte_internal
unsigned int qman_portal_poll_rx(unsigned int poll_limit,
void **bufs, struct qman_portal *q);
* qman_set_vdq - Issue a volatile dequeue command
* @fq: Frame Queue on which the volatile dequeue command is issued
* @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
*
* This function will issue a volatile dequeue command to the QMAN.
*/
-int qman_set_vdq(struct qman_fq *fq, u16 num);
+__rte_internal
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
/**
* qman_dequeue - Get the DQRR entry after volatile dequeue command
* is issued. It will keep returning NULL until there is no packet available on
* the DQRR.
*/
+__rte_internal
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
/**
* This will consume the DQRR enrey and make it available for next volatile
* dequeue.
*/
+__rte_internal
void qman_dqrr_consume(struct qman_fq *fq,
struct qm_dqrr_entry *dq);
* this function will return -EINVAL, otherwise the return value is >=0 and
* represents the number of DQRR entries processed.
*/
+__rte_internal
int qman_poll_dqrr(unsigned int limit);
/**
* (SDQCR). The requested pools are limited to those the portal has dequeue
* access to.
*/
+__rte_internal
void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
/**
* function must be called from the same CPU as that which processed the DQRR
* entry in the first place.
*/
+__rte_internal
void qman_dca_index(u8 index, int park_request);
/**
* a frame queue object based on that, rather than assuming/requiring that it be
* Out of Service.
*/
+__rte_internal
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
/**
* qman_fq_fqid - Queries the frame queue ID of a FQ object
* @fq: the frame queue object to query
*/
+__rte_internal
u32 qman_fq_fqid(struct qman_fq *fq);
/**
* This captures the state, as seen by the driver, at the time the function
* executes.
*/
+__rte_internal
void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
/**
* context_a.address fields and will leave the stashing fields provided by the
* user alone, otherwise it will zero out the context_a.stashing fields.
*/
+__rte_internal
int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
/**
* caller should be prepared to accept the callback as the function is called,
* not only once it has returned.
*/
+__rte_internal
int qman_retire_fq(struct qman_fq *fq, u32 *flags);
/**
* The frame queue must be retired and empty, and if any order restoration list
* was released as ERNs at the time of retirement, they must all be consumed.
*/
+__rte_internal
int qman_oos_fq(struct qman_fq *fq);
/**
* @fq: the frame queue object to be queried
* @np: storage for the queried FQD fields
*/
+__rte_internal
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
/**
* @fq: the frame queue object to be queried
* @frm_cnt: number of frames in the queue
*/
+__rte_internal
int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
/**
* callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
* "flags" retrieved from qman_fq_state().
*/
+__rte_internal
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
/**
* of an already busy hardware resource by throttling many of the to-be-dropped
* enqueues "at the source".
*/
+__rte_internal
int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+__rte_internal
int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
int frames_to_send);
* This API is similar to qman_enqueue_multi(), but it takes fd which needs
* to be processed by different frame queues.
*/
+__rte_internal
int
qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
- int frames_to_send);
+ u32 *flags, int frames_to_send);
typedef int (*qman_cb_precommit) (void *arg);
* @fqid: the base FQID of the range to deallocate
* @count: the number of FQIDs in the range
*/
+__rte_internal
int qman_reserve_fqid_range(u32 fqid, unsigned int count);
static inline int qman_reserve_fqid(u32 fqid)
{
* than requested (though alignment will be as requested). If @partial is zero,
* the return value will either be 'count' or negative.
*/
+__rte_internal
int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
static inline int qman_alloc_pool(u32 *result)
{
* any unspecified parameters) will be used rather than a modify hw hardware
* (which only modifies the specified parameters).
*/
+__rte_internal
int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts);
* is executed. This must be excuted on the same affine portal on which it was
* created.
*/
+__rte_internal
int qman_delete_cgr(struct qman_cgr *cgr);
/**
* unspecified parameters) will be used rather than a modify hw hardware (which
* only modifies the specified parameters).
*/
+__rte_internal
int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts);
* than requested (though alignment will be as requested). If @partial is zero,
* the return value will either be 'count' or negative.
*/
+__rte_internal
int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
static inline int qman_alloc_cgrid(u32 *result)
{
* @id: the base CGR ID of the range to deallocate
* @count: the number of CGR IDs in the range
*/
+__rte_internal
void qman_release_cgrid_range(u32 id, unsigned int count);
static inline void qman_release_cgrid(u32 id)
{