1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2022 NXP
8 #define DPAA2_QDMA_MAX_DESC 1024
9 #define DPAA2_QDMA_MIN_DESC 1
10 #define DPAA2_QDMA_MAX_VHANS 64
12 #define DPAA2_QDMA_VQ_FD_SHORT_FORMAT (1ULL << 0)
13 #define DPAA2_QDMA_VQ_FD_SG_FORMAT (1ULL << 1)
14 #define DPAA2_QDMA_VQ_NO_RESPONSE (1ULL << 2)
16 #define DPAA2_QDMA_MAX_FLE 3
17 #define DPAA2_QDMA_MAX_SDD 2
19 #define DPAA2_QDMA_MAX_SG_NB 64
21 #define DPAA2_DPDMAI_MAX_QUEUES 1
23 /** FLE single job pool size: job pointer(uint64_t) +
24 * 3 Frame list + 2 source/destination descriptor.
26 #define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
27 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
28 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
30 /** FLE sg jobs pool size: job number(uint64_t) +
31 * 3 Frame list + 2 source/destination descriptor +
32 * 64 (src + dst) sg entries + 64 jobs pointers.
34 #define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
35 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
36 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
37 sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
38 sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
40 #define QDMA_FLE_JOB_NB_OFFSET 0
42 #define QDMA_FLE_SINGLE_JOB_OFFSET 0
44 #define QDMA_FLE_FLE_OFFSET \
45 (QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
47 #define QDMA_FLE_SDD_OFFSET \
48 (QDMA_FLE_FLE_OFFSET + \
49 sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
51 #define QDMA_FLE_SG_ENTRY_OFFSET \
52 (QDMA_FLE_SDD_OFFSET + \
53 sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
55 #define QDMA_FLE_SG_JOBS_OFFSET \
56 (QDMA_FLE_SG_ENTRY_OFFSET + \
57 sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
59 /** FLE pool cache size */
60 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
62 /** Notification by FQD_CTX[fqid] */
63 #define QDMA_SER_CTX (1 << 8)
64 #define DPAA2_RBP_MEM_RW 0x0
66 * Source descriptor command read transaction type for RBP=0:
67 * coherent copy of cacheable memory
69 #define DPAA2_COHERENT_NO_ALLOCATE_CACHE 0xb
70 #define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE 0x7
72 * Destination descriptor command write transaction type for RBP=0:
73 * coherent copy of cacheable memory
75 #define DPAA2_COHERENT_ALLOCATE_CACHE 0x6
76 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb
78 /** Maximum possible H/W Queues on each core */
79 #define MAX_HW_QUEUE_PER_CORE 64
81 #define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
83 /** Source/Destination Descriptor */
86 /** Stride configuration */
88 /** Route-by-port command */
132 #define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
133 #define QDMA_SG_FMT_FDS 0x1 /* frame data section */
134 #define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
135 #define QDMA_SG_SL_SHORT 0x1 /* short length */
136 #define QDMA_SG_SL_LONG 0x0 /* long length */
137 #define QDMA_SG_F 0x1 /* last sg entry */
138 #define QDMA_SG_BMT_ENABLE 0x1
139 #define QDMA_SG_BMT_DISABLE 0x0
141 struct qdma_sg_entry {
142 uint32_t addr_lo; /* address 0:31 */
143 uint32_t addr_hi:17; /* address 32:48 */
146 uint32_t data_len_sl0; /* SL=0, the long format */
148 uint32_t len:17; /* SL=1, the short format */
152 uint32_t size:10; /* buff size */
154 } data_len; /* AVAIL_LENGTH */
156 uint32_t ctrl_fields;
169 /** Represents a DPDMAI device */
170 struct dpaa2_dpdmai_dev {
171 /** Pointer to Next device instance */
172 TAILQ_ENTRY(dpaa2_qdma_device) next;
173 /** handle to DPDMAI object */
174 struct fsl_mc_io dpdmai;
175 /** HW ID for DPDMAI object */
177 /** Tocken of this device */
179 /** Number of queue in this DPDMAI device */
182 struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
184 struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
185 struct qdma_device *qdma_dev;
188 struct qdma_virt_queue;
190 typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
191 const struct qbman_fd *fd,
192 struct rte_dpaa2_qdma_job **job,
194 typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
196 struct rte_dpaa2_qdma_job **job,
199 typedef int (qdma_dequeue_multijob_t)(
200 struct qdma_virt_queue *qdma_vq,
202 struct rte_dpaa2_qdma_job **job,
205 typedef int (qdma_enqueue_multijob_t)(
206 struct qdma_virt_queue *qdma_vq,
207 struct rte_dpaa2_qdma_job **job,
210 /** Represents a QDMA virtual queue */
211 struct qdma_virt_queue {
212 /** Status ring of the virtual queue */
213 struct rte_ring *status_ring;
214 /** Associated hw queue */
215 struct dpaa2_dpdmai_dev *dpdmai_dev;
216 /** FLE pool for the queue */
217 struct rte_mempool *fle_pool;
219 struct rte_dpaa2_qdma_rbp rbp;
220 /** States if this vq is in use or not */
222 /** States if this vq has exclusively associated hw queue */
223 uint8_t exclusive_hw_queue;
224 /** Number of descriptor for the virtual DMA channel */
226 /* Total number of enqueues on this VQ */
227 uint64_t num_enqueues;
228 /* Total number of dequeues from this VQ */
229 uint64_t num_dequeues;
234 struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
235 struct rte_mempool *job_pool;
238 struct rte_dma_stats stats;
240 qdma_set_fd_t *set_fd;
241 qdma_get_job_t *get_job;
243 qdma_dequeue_multijob_t *dequeue_job;
244 qdma_enqueue_multijob_t *enqueue_job;
247 /** Represents a QDMA device. */
249 /** VQ's of this device */
250 struct qdma_virt_queue *vqs;
251 /** Total number of VQ's */
253 /** Device state - started or stopped */
257 #endif /* _DPAA2_QDMA_H_ */