X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fbus%2Fdpaa%2Finclude%2Ffsl_qman.h;h=4411bb0a79d0e65f7d58faef694044c9b4f9a0ae;hb=1e0f9b07755df9855d7c53365d17c56d33d4efbd;hp=99e46e1283e095971383e10dba6670d71be41815;hpb=43797e7b47741d58e5061255171f82a9d423bdf4;p=dpdk.git diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h index 99e46e1283..4411bb0a79 100644 --- a/drivers/bus/dpaa/include/fsl_qman.h +++ b/drivers/bus/dpaa/include/fsl_qman.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2019 NXP * */ @@ -192,7 +193,7 @@ struct qm_fd { u32 cmd; u32 status; }; -} __attribute__((aligned(8))); +} __rte_aligned(8); #define QM_FD_DD_NULL 0x00 #define QM_FD_PID_MASK 0x3f static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) @@ -284,20 +285,20 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) } while (0) /* See 1.5.8.1: "Enqueue Command" */ -struct qm_eqcr_entry { +struct __rte_aligned(8) qm_eqcr_entry { u8 __dont_write_directly__verb; u8 dca; u16 seqnum; u32 orp; /* 24-bit */ u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; + struct qm_fd fd; /* this has alignment 8 */ u8 __reserved3[32]; } __packed; /* "Frame Dequeue Response" */ -struct qm_dqrr_entry { +struct __rte_aligned(8) qm_dqrr_entry { u8 verb; u8 stat; u16 seqnum; /* 15-bit */ @@ -305,7 +306,7 @@ struct qm_dqrr_entry { u8 __reserved2[3]; u32 fqid; /* 24-bit */ u32 contextB; - struct qm_fd fd; + struct qm_fd fd; /* this has alignment 8 */ u8 __reserved4[32]; }; @@ -323,18 +324,19 @@ struct qm_dqrr_entry { /* "ERN Message Response" */ /* "FQ State Change Notification" */ struct qm_mr_entry { - u8 verb; union { struct { + u8 verb; u8 dca; u16 seqnum; u8 rc; /* Rejection Code */ u32 orp:24; u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; - } __packed ern; + struct qm_fd fd; /* this has alignment 8 */ + } __packed __rte_aligned(8) ern; struct { + u8 verb; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */ u8 __reserved1:4; @@ -349,18 +351,19 @@ struct qm_mr_entry { u32 __reserved3:24; u32 fqid; /* 24-bit */ u32 tag; - struct qm_fd fd; - } __packed dcern; + struct qm_fd fd; /* this has alignment 8 */ + } __packed __rte_aligned(8) dcern; struct { + u8 verb; u8 fqs; /* Frame Queue Status */ u8 __reserved1[6]; u32 fqid; /* 24-bit */ u32 contextB; u8 __reserved2[16]; - } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ + } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */ }; u8 __reserved2[32]; -} __packed; +} __packed __rte_aligned(8); #define QM_MR_VERB_VBIT 0x80 /* * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb @@ -1131,6 +1134,14 @@ typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event, const struct qm_dqrr_entry *dqrr, void **bd); +/* This callback type is used when handling buffers in dpdk pull mode */ +typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq, + struct qm_dqrr_entry **dqrr, + void **bufs, + int num_bufs); + +typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs); + /* * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They * are always consumed after the callback returns. @@ -1191,8 +1202,10 @@ enum qman_fq_state { struct qman_fq_cb { union { /* for dequeued frames */ qman_dpdk_cb_dqrr dqrr_dpdk_cb; + qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb; qman_cb_dqrr dqrr; }; + qman_dpdk_cb_prepare dqrr_prepare; qman_cb_mr ern; /* for s/w ERNs */ qman_cb_mr fqs; /* frame-queue state changes*/ }; @@ -1202,9 +1215,13 @@ struct qman_fq { struct qman_fq_cb cb; u32 fqid_le; + u32 fqid; + + int q_fd; u16 ch_id; u8 cgr_groupid; - u8 is_static; + u8 is_static:4; + u8 qp_initialized:4; /* DPDK Interface */ void *dpaa_intf; @@ -1212,15 +1229,16 @@ struct qman_fq { struct rte_event ev; /* affined portal in case of static queue */ struct qman_portal *qp; + struct dpaa_bp_info *bp_array; volatile unsigned long flags; enum qman_fq_state state; - u32 fqid; spinlock_t fqlock; struct rb_node node; #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + void **qman_fq_lookup_table; u32 key; #endif }; @@ -1295,14 +1313,58 @@ struct qman_cgr { #define QMAN_CGR_FLAG_USE_INIT 0x00000001 #define QMAN_CGR_MODE_FRAME 0x00000001 +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +__rte_internal +void qman_set_fq_lookup_table(void **table); +#endif + /** * qman_get_portal_index - get portal configuration index */ int qman_get_portal_index(void); +__rte_internal u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit, void **bufs); +/** + * qman_irqsource_add - add processing sources to be interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Adds processing sources that should be interrupt-driven (rather than + * processed via qman_poll_***() functions). Returns zero for success, or + * -EINVAL if the current CPU is sharing a portal hosted on another CPU. + */ +__rte_internal +int qman_irqsource_add(u32 bits); + +/** + * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it + * takes portal (fq specific) as input rather than using the thread affined + * portal. + */ +__rte_internal +int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits); + +/** + * qman_irqsource_remove - remove processing sources from being interrupt-driven + * @bits: bitmask of QM_PIRQ_**I processing sources + * + * Removes processing sources from being interrupt-driven, so that they will + * instead be processed via qman_poll_***() functions. Returns zero for success, + * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. + */ +__rte_internal +int qman_irqsource_remove(u32 bits); + +/** + * qman_fq_portal_irqsource_remove - similar to qman_irqsource_remove, but it + * takes portal (fq specific) as input rather than using the thread affined + * portal. + */ +__rte_internal +int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits); + /** * qman_affine_channel - return the channel ID of an portal * @cpu: the cpu whose affine portal is the subject of the query @@ -1313,6 +1375,7 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit, */ u16 qman_affine_channel(int cpu); +__rte_internal unsigned int qman_portal_poll_rx(unsigned int poll_limit, void **bufs, struct qman_portal *q); @@ -1320,10 +1383,12 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit, * qman_set_vdq - Issue a volatile dequeue command * @fq: Frame Queue on which the volatile dequeue command is issued * @num: Number of Frames requested for volatile dequeue + * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command * * This function will issue a volatile dequeue command to the QMAN. */ -int qman_set_vdq(struct qman_fq *fq, u16 num); +__rte_internal +int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags); /** * qman_dequeue - Get the DQRR entry after volatile dequeue command @@ -1333,6 +1398,7 @@ int qman_set_vdq(struct qman_fq *fq, u16 num); * is issued. It will keep returning NULL until there is no packet available on * the DQRR. */ +__rte_internal struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq); /** @@ -1344,6 +1410,7 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq); * This will consume the DQRR enrey and make it available for next volatile * dequeue. */ +__rte_internal void qman_dqrr_consume(struct qman_fq *fq, struct qm_dqrr_entry *dq); @@ -1357,6 +1424,7 @@ void qman_dqrr_consume(struct qman_fq *fq, * this function will return -EINVAL, otherwise the return value is >=0 and * represents the number of DQRR entries processed. */ +__rte_internal int qman_poll_dqrr(unsigned int limit); /** @@ -1403,6 +1471,7 @@ void qman_start_dequeues(void); * (SDQCR). The requested pools are limited to those the portal has dequeue * access to. */ +__rte_internal void qman_static_dequeue_add(u32 pools, struct qman_portal *qm); /** @@ -1450,6 +1519,7 @@ void qman_dca(const struct qm_dqrr_entry *dq, int park_request); * function must be called from the same CPU as that which processed the DQRR * entry in the first place. */ +__rte_internal void qman_dca_index(u8 index, int park_request); /** @@ -1507,6 +1577,7 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int affine); * a frame queue object based on that, rather than assuming/requiring that it be * Out of Service. */ +__rte_internal int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); /** @@ -1525,6 +1596,7 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags); * qman_fq_fqid - Queries the frame queue ID of a FQ object * @fq: the frame queue object to query */ +__rte_internal u32 qman_fq_fqid(struct qman_fq *fq); /** @@ -1537,6 +1609,7 @@ u32 qman_fq_fqid(struct qman_fq *fq); * This captures the state, as seen by the driver, at the time the function * executes. */ +__rte_internal void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); /** @@ -1573,6 +1646,7 @@ void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags); * context_a.address fields and will leave the stashing fields provided by the * user alone, otherwise it will zero out the context_a.stashing fields. */ +__rte_internal int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); /** @@ -1602,6 +1676,7 @@ int qman_schedule_fq(struct qman_fq *fq); * caller should be prepared to accept the callback as the function is called, * not only once it has returned. */ +__rte_internal int qman_retire_fq(struct qman_fq *fq, u32 *flags); /** @@ -1611,6 +1686,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags); * The frame queue must be retired and empty, and if any order restoration list * was released as ERNs at the time of retirement, they must all be consumed. */ +__rte_internal int qman_oos_fq(struct qman_fq *fq); /** @@ -1644,6 +1720,7 @@ int qman_query_fq_has_pkts(struct qman_fq *fq); * @fq: the frame queue object to be queried * @np: storage for the queried FQD fields */ +__rte_internal int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); /** @@ -1651,6 +1728,7 @@ int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np); * @fq: the frame queue object to be queried * @frm_cnt: number of frames in the queue */ +__rte_internal int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt); /** @@ -1681,6 +1759,7 @@ int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq); * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the * "flags" retrieved from qman_fq_state(). */ +__rte_internal int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); /** @@ -1716,8 +1795,10 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); * of an already busy hardware resource by throttling many of the to-be-dropped * enqueues "at the source". */ +__rte_internal int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags); +__rte_internal int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags, int frames_to_send); @@ -1731,9 +1812,10 @@ int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags, * This API is similar to qman_enqueue_multi(), but it takes fd which needs * to be processed by different frame queues. */ +__rte_internal int qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd, - int frames_to_send); + u32 *flags, int frames_to_send); typedef int (*qman_cb_precommit) (void *arg); @@ -1819,6 +1901,7 @@ int qman_shutdown_fq(u32 fqid); * @fqid: the base FQID of the range to deallocate * @count: the number of FQIDs in the range */ +__rte_internal int qman_reserve_fqid_range(u32 fqid, unsigned int count); static inline int qman_reserve_fqid(u32 fqid) { @@ -1838,6 +1921,7 @@ static inline int qman_reserve_fqid(u32 fqid) * than requested (though alignment will be as requested). If @partial is zero, * the return value will either be 'count' or negative. */ +__rte_internal int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial); static inline int qman_alloc_pool(u32 *result) { @@ -1885,6 +1969,7 @@ void qman_seed_pool_range(u32 id, unsigned int count); * any unspecified parameters) will be used rather than a modify hw hardware * (which only modifies the specified parameters). */ +__rte_internal int qman_create_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts); @@ -1907,6 +1992,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal, * is executed. This must be excuted on the same affine portal on which it was * created. */ +__rte_internal int qman_delete_cgr(struct qman_cgr *cgr); /** @@ -1923,6 +2009,7 @@ int qman_delete_cgr(struct qman_cgr *cgr); * unspecified parameters) will be used rather than a modify hw hardware (which * only modifies the specified parameters). */ +__rte_internal int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts); @@ -1951,6 +2038,7 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion); * than requested (though alignment will be as requested). If @partial is zero, * the return value will either be 'count' or negative. */ +__rte_internal int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial); static inline int qman_alloc_cgrid(u32 *result) { @@ -1964,6 +2052,7 @@ static inline int qman_alloc_cgrid(u32 *result) * @id: the base CGR ID of the range to deallocate * @count: the number of CGR IDs in the range */ +__rte_internal void qman_release_cgrid_range(u32 id, unsigned int count); static inline void qman_release_cgrid(u32 id) {