1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
5 #ifndef __DPAA2_QDMA_H__
6 #define __DPAA2_QDMA_H__
11 #define DPAA2_QDMA_MAX_FLE 3
12 #define DPAA2_QDMA_MAX_SDD 2
14 #define DPAA2_QDMA_MAX_SG_NB 64
16 #define DPAA2_DPDMAI_MAX_QUEUES 8
18 /** FLE pool size: job number(uint64_t) +
19 * 3 Frame list + 2 source/destination descriptor +
20 * 32 (src + dst) sg entries + 32 jobs pointers.
23 #define QDMA_FLE_POOL_SIZE (sizeof(uint64_t) + \
24 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
25 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
26 sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2 + \
27 sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
29 #define QDMA_FLE_JOB_NB_OFFSET 0
31 #define QDMA_FLE_FLE_OFFSET \
32 (QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
34 #define QDMA_FLE_SDD_OFFSET \
35 (QDMA_FLE_FLE_OFFSET + \
36 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
38 #define QDMA_FLE_SG_ENTRY_OFFSET \
39 (QDMA_FLE_SDD_OFFSET + \
40 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
42 #define QDMA_FLE_JOBS_OFFSET \
43 (QDMA_FLE_SG_ENTRY_OFFSET + \
44 sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
46 /** FLE pool cache size */
47 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
49 /** Notification by FQD_CTX[fqid] */
50 #define QDMA_SER_CTX (1 << 8)
51 #define DPAA2_RBP_MEM_RW 0x0
53 * Source descriptor command read transaction type for RBP=0:
54 * coherent copy of cacheable memory
56 #define DPAA2_COHERENT_NO_ALLOCATE_CACHE 0xb
57 #define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE 0x7
59 * Destination descriptor command write transaction type for RBP=0:
60 * coherent copy of cacheable memory
62 #define DPAA2_COHERENT_ALLOCATE_CACHE 0x6
63 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb
65 /** Maximum possible H/W Queues on each core */
66 #define MAX_HW_QUEUE_PER_CORE 64
68 #define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
70 * Represents a QDMA device.
71 * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
74 /** total number of hw queues. */
75 uint16_t num_hw_queues;
77 * Maximum number of hw queues to be alocated per core.
78 * This is limited by MAX_HW_QUEUE_PER_CORE
80 uint16_t max_hw_queues_per_core;
82 /** VQ's of this device */
83 struct qdma_virt_queue *vqs;
84 /** Maximum number of VQ's */
86 /** Device state - started or stopped */
88 /** FLE pool for the device */
89 struct rte_mempool *fle_pool;
92 /** A lock to QDMA device whenever required */
96 /** Represents a QDMA H/W queue */
97 struct qdma_hw_queue {
98 /** Pointer to Next instance */
99 TAILQ_ENTRY(qdma_hw_queue) next;
100 /** DPDMAI device to communicate with HW */
101 struct dpaa2_dpdmai_dev *dpdmai_dev;
102 /** queue ID to communicate with HW */
104 /** Associated lcore id */
106 /** Number of users of this hw queue */
110 struct qdma_virt_queue;
112 typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
113 const struct qbman_fd *fd,
114 struct rte_qdma_job **job,
116 typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
118 struct rte_qdma_job **job,
121 typedef int (qdma_dequeue_multijob_t)(
122 struct qdma_virt_queue *qdma_vq,
124 struct rte_qdma_job **job,
127 typedef int (qdma_enqueue_multijob_t)(
128 struct qdma_virt_queue *qdma_vq,
129 struct rte_qdma_job **job,
132 /** Represents a QDMA virtual queue */
133 struct qdma_virt_queue {
134 /** Status ring of the virtual queue */
135 struct rte_ring *status_ring;
136 /** Associated hw queue */
137 struct qdma_hw_queue *hw_queue;
139 struct rte_qdma_rbp rbp;
140 /** Associated lcore id */
142 /** States if this vq is in use or not */
144 /** States if this vq has exclusively associated hw queue */
145 uint8_t exclusive_hw_queue;
146 /* Total number of enqueues on this VQ */
147 uint64_t num_enqueues;
148 /* Total number of dequeues from this VQ */
149 uint64_t num_dequeues;
154 qdma_set_fd_t *set_fd;
155 qdma_get_job_t *get_job;
157 qdma_dequeue_multijob_t *dequeue_job;
158 qdma_enqueue_multijob_t *enqueue_job;
161 /** Represents a QDMA per core hw queues allocation in virtual mode */
162 struct qdma_per_core_info {
163 /** list for allocated hw queues */
164 struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
165 /* Number of hw queues allocated for this core */
166 uint16_t num_hw_queues;
169 /** Source/Destination Descriptor */
172 /** Stride configuration */
174 /** Route-by-port command */
218 #define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
219 #define QDMA_SG_FMT_FDS 0x1 /* frame data section */
220 #define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
221 #define QDMA_SG_SL_SHORT 0x1 /* short length */
222 #define QDMA_SG_SL_LONG 0x0 /* long length */
223 #define QDMA_SG_F 0x1 /* last sg entry */
224 #define QDMA_SG_BMT_ENABLE 0x1
225 #define QDMA_SG_BMT_DISABLE 0x0
227 struct qdma_sg_entry {
228 uint32_t addr_lo; /* address 0:31 */
229 uint32_t addr_hi:17; /* address 32:48 */
232 uint32_t data_len_sl0; /* SL=0, the long format */
234 uint32_t len:17; /* SL=1, the short format */
238 uint32_t size:10; /* buff size */
240 } data_len; /* AVAIL_LENGTH */
242 uint32_t ctrl_fields;
255 /** Represents a DPDMAI raw device */
256 struct dpaa2_dpdmai_dev {
257 /** Pointer to Next device instance */
258 TAILQ_ENTRY(dpaa2_qdma_device) next;
259 /** handle to DPDMAI object */
260 struct fsl_mc_io dpdmai;
261 /** HW ID for DPDMAI object */
263 /** Tocken of this device */
265 /** Number of queue in this DPDMAI device */
268 struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
270 struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
271 struct qdma_device *qdma_dev;
274 static inline struct qdma_device *
275 QDMA_DEV_OF_VQ(struct qdma_virt_queue *vq)
277 return vq->hw_queue->dpdmai_dev->qdma_dev;
280 #endif /* __DPAA2_QDMA_H__ */