4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _DPAA2_HW_PVT_H_
35 #define _DPAA2_HW_PVT_H_
37 #include <mc/fsl_mc_sys.h>
38 #include <fsl_qbman_portal.h>
46 #define lower_32_bits(x) ((uint32_t)(x))
47 #define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
49 #define SVR_LS1080A 0x87030000
50 #define SVR_LS2080A 0x87010000
51 #define SVR_LS2088A 0x87090000
54 #define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
57 #define MAX_TX_RING_SLOTS 8
58 /** <Maximum number of slots available in TX ring*/
60 #define DPAA2_DQRR_RING_SIZE 16
61 /** <Maximum number of slots available in RX ring*/
63 #define MC_PORTAL_INDEX 0
64 #define NUM_DPIO_REGIONS 2
65 #define NUM_DQS_PER_QUEUE 2
67 /* Maximum release/acquire from QBMAN */
68 #define DPAA2_MBUF_MAX_ACQ_REL 7
71 #define DPAA2_MBUF_HW_ANNOTATION 64
72 #define DPAA2_FD_PTA_SIZE 0
74 #if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
75 #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
78 /* we will re-use the HEADROOM for annotation in RX */
79 #define DPAA2_HW_BUF_RESERVE 0
80 #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
82 #define DPAA2_DPCI_MAX_QUEUES 2
84 struct dpaa2_dpio_dev {
85 TAILQ_ENTRY(dpaa2_dpio_dev) next;
86 /**< Pointer to Next device instance */
87 uint16_t index; /**< Index of a instance in the list */
88 rte_atomic16_t ref_count;
89 /**< How many thread contexts are sharing this.*/
90 struct fsl_mc_io *dpio; /** handle to DPIO portal object */
92 struct qbman_swp *sw_portal; /** SW portal object */
93 const struct qbman_result *dqrr[4];
94 /**< DQRR Entry for this SW portal */
95 void *mc_portal; /**< MC Portal for configuring this device */
96 uintptr_t qbman_portal_ce_paddr;
97 /**< Physical address of Cache Enabled Area */
98 uintptr_t ce_size; /**< Size of the CE region */
99 uintptr_t qbman_portal_ci_paddr;
100 /**< Physical address of Cache Inhibit Area */
101 uintptr_t ci_size; /**< Size of the CI region */
102 int32_t vfio_fd; /**< File descriptor received via VFIO */
103 int32_t hw_id; /**< An unique ID of this DPIO device instance */
106 struct dpaa2_dpbp_dev {
107 TAILQ_ENTRY(dpaa2_dpbp_dev) next;
108 /**< Pointer to Next device instance */
109 struct fsl_mc_io dpbp; /** handle to DPBP portal object */
111 rte_atomic16_t in_use;
112 uint32_t dpbp_id; /*HW ID for DPBP object */
115 struct queue_storage_info_t {
116 struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
117 struct qbman_result *active_dqs;
123 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
125 int32_t eventfd; /*!< Event Fd of this queue */
126 uint32_t fqid; /*!< Unique ID of this queue */
127 uint8_t tc_index; /*!< traffic class identifier */
128 uint16_t flow_id; /*!< To be used by DPAA2 frmework */
133 struct queue_storage_info_t *q_storage;
134 struct qbman_result *cscn;
138 struct swp_active_dqs {
139 struct qbman_result *global_active_dqs;
140 uint64_t reserved[7];
143 #define NUM_MAX_SWP 64
145 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
147 struct dpaa2_dpci_dev {
148 TAILQ_ENTRY(dpaa2_dpci_dev) next;
149 /**< Pointer to Next device instance */
150 struct fsl_mc_io dpci; /** handle to DPCI portal object */
152 rte_atomic16_t in_use;
153 uint32_t dpci_id; /*HW ID for DPCI object */
154 struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
157 /*! Global MCP list */
158 extern void *(*rte_mcp_ptr_list);
160 /* Refer to Table 7-3 in SEC BG */
165 /* FMT must be 00, MSB is final bit */
166 uint32_t fin_bpid_offset;
168 uint32_t reserved[3]; /* Not used currently */
175 uint32_t fin_bpid_offset;
178 /* There are three types of frames: Single, Scatter Gather and Frame Lists */
179 enum qbman_fd_format {
184 /*Macros to define operations on FD*/
185 #define DPAA2_SET_FD_ADDR(fd, addr) do { \
186 fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
187 fd->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
189 #define DPAA2_SET_FD_LEN(fd, length) (fd)->simple.len = length
190 #define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
191 #define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
192 #define DPAA2_SET_FD_OFFSET(fd, offset) \
193 ((fd->simple.bpid_offset |= (uint32_t)(offset) << 16))
194 #define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len))
195 #define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc
196 #define DPAA2_RESET_FD_CTRL(fd) (fd)->simple.ctrl = 0
198 #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
199 #define DPAA2_SET_FD_FLC(fd, addr) do { \
200 fd->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
201 fd->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
203 #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) (fle->frc = (0x80000000 | (len)))
204 #define DPAA2_GET_FLE_ADDR(fle) \
205 (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
206 #define DPAA2_SET_FLE_ADDR(fle, addr) do { \
207 fle->addr_lo = lower_32_bits((uint64_t)addr); \
208 fle->addr_hi = upper_32_bits((uint64_t)addr); \
210 #define DPAA2_GET_FLE_CTXT(fle) \
211 (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
213 #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
214 fle->reserved[0] = lower_32_bits((uint64_t)addr); \
215 fle->reserved[1] = upper_32_bits((uint64_t)addr); \
217 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
218 ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
219 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
220 #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
221 #define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
222 #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
223 #define DPAA2_SET_FD_COMPOUND_FMT(fd) \
224 (fd->simple.bpid_offset |= (uint32_t)1 << 28)
225 #define DPAA2_GET_FD_ADDR(fd) \
226 ((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
228 #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
229 #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
230 #define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
231 #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
232 #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
233 #define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
234 #define DPAA2_IS_SET_FLE_SG_EXT(fle) \
235 ((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
237 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
238 ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
240 #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
242 #define DPAA2_FD_SET_FORMAT(fd, format) do { \
243 (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
244 (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
246 #define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
248 #define DPAA2_SG_SET_FINAL(sg, fin) do { \
249 (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
250 (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
252 #define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
253 /* Only Enqueue Error responses will be
254 * pushed on FQID_ERR of Enqueue FQ
256 #define DPAA2_EQ_RESP_ERR_FQ 0
257 /* All Enqueue responses will be pushed on address
258 * set with qbman_eq_desc_set_response
260 #define DPAA2_EQ_RESP_ALWAYS 1
262 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
263 static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
264 /* todo - this is costly, need to write a fast coversion routine */
265 static void *dpaa2_mem_ptov(phys_addr_t paddr)
267 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
270 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
271 if (paddr >= memseg[i].phys_addr &&
272 (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
273 return (void *)(memseg[i].addr_64
274 + (paddr - memseg[i].phys_addr));
279 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
280 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
282 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
285 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
286 if (vaddr >= memseg[i].addr_64 &&
287 vaddr < memseg[i].addr_64 + memseg[i].len)
288 return memseg[i].phys_addr
289 + (vaddr - memseg[i].addr_64);
291 return (phys_addr_t)(NULL);
295 * When we are using Physical addresses as IO Virtual Addresses,
296 * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
298 * These routines are called with help of below MACRO's
301 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
302 #define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
305 * macro to convert Virtual address to IOVA
307 #define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
310 * macro to convert IOVA to Virtual address
312 #define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
315 * macro to convert modify the memory containing IOVA to Virtual address
317 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
318 {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
320 #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
322 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
323 #define DPAA2_OP_VADDR_TO_IOVA(op) (op)
324 #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
325 #define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
326 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
328 #endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
331 int check_swp_active_dqs(uint16_t dpio_index)
333 if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
339 void clear_swp_active_dqs(uint16_t dpio_index)
341 rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
345 struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
347 return rte_global_active_dqs_list[dpio_index].global_active_dqs;
351 void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
353 rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
355 struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
356 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
358 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
359 void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);