1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
8 #ifndef _DPAA2_HW_PVT_H_
9 #define _DPAA2_HW_PVT_H_
11 #include <rte_eventdev.h>
12 #include <dpaax_iova_table.h>
14 #include <mc/fsl_mc_sys.h>
15 #include <fsl_qbman_portal.h>
23 #define lower_32_bits(x) ((uint32_t)(x))
24 #define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
26 #define SVR_LS1080A 0x87030000
27 #define SVR_LS2080A 0x87010000
28 #define SVR_LS2088A 0x87090000
29 #define SVR_LX2160A 0x87360000
32 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
35 /* Maximum number of slots available in TX ring */
36 #define MAX_TX_RING_SLOTS 32
37 #define MAX_EQ_RESP_ENTRIES (MAX_TX_RING_SLOTS + 1)
39 /* Maximum number of slots available in RX ring */
40 #define DPAA2_EQCR_RING_SIZE 8
41 /* Maximum number of slots available in RX ring on LX2 */
42 #define DPAA2_LX2_EQCR_RING_SIZE 32
44 /* Maximum number of slots available in RX ring */
45 #define DPAA2_DQRR_RING_SIZE 16
46 /* Maximum number of slots available in RX ring on LX2 */
47 #define DPAA2_LX2_DQRR_RING_SIZE 32
49 /* EQCR shift to get EQCR size (2 >> 3) = 8 for LS2/LS2 */
50 #define DPAA2_EQCR_SHIFT 3
51 /* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
52 #define DPAA2_LX2_EQCR_SHIFT 5
54 /* Flag to determine an ordered queue mbuf */
55 #define DPAA2_ENQUEUE_FLAG_ORP (1ULL << 30)
56 /* ORP ID shift and mask */
57 #define DPAA2_EQCR_OPRID_SHIFT 16
58 #define DPAA2_EQCR_OPRID_MASK 0x3FFF0000
59 /* Sequence number shift and mask */
60 #define DPAA2_EQCR_SEQNUM_SHIFT 0
61 #define DPAA2_EQCR_SEQNUM_MASK 0x0000FFFF
63 #define DPAA2_SWP_CENA_REGION 0
64 #define DPAA2_SWP_CINH_REGION 1
65 #define DPAA2_SWP_CENA_MEM_REGION 2
67 #define MC_PORTAL_INDEX 0
68 #define NUM_DPIO_REGIONS 2
69 #define NUM_DQS_PER_QUEUE 2
71 /* Maximum release/acquire from QBMAN */
72 #define DPAA2_MBUF_MAX_ACQ_REL 7
74 #define DPAA2_MEMPOOL_OPS_NAME "dpaa2"
77 #define DPAA2_MBUF_HW_ANNOTATION 64
78 #define DPAA2_FD_PTA_SIZE 0
80 #if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
81 #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
84 /* we will re-use the HEADROOM for annotation in RX */
85 #define DPAA2_HW_BUF_RESERVE 0
86 #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
88 #define DPAA2_DPCI_MAX_QUEUES 2
92 struct eqresp_metadata {
93 struct dpaa2_queue *dpaa2_q;
94 struct rte_mempool *mp;
97 struct dpaa2_dpio_dev {
98 TAILQ_ENTRY(dpaa2_dpio_dev) next;
99 /**< Pointer to Next device instance */
100 uint16_t index; /**< Index of a instance in the list */
101 rte_atomic16_t ref_count;
102 /**< How many thread contexts are sharing this.*/
105 struct qbman_result *eqresp;
106 struct eqresp_metadata *eqresp_meta;
107 struct fsl_mc_io *dpio; /** handle to DPIO portal object */
109 struct qbman_swp *sw_portal; /** SW portal object */
110 const struct qbman_result *dqrr[4];
111 /**< DQRR Entry for this SW portal */
112 void *mc_portal; /**< MC Portal for configuring this device */
113 uintptr_t qbman_portal_ce_paddr;
114 /**< Physical address of Cache Enabled Area */
115 uintptr_t ce_size; /**< Size of the CE region */
116 uintptr_t qbman_portal_ci_paddr;
117 /**< Physical address of Cache Inhibit Area */
118 uintptr_t ci_size; /**< Size of the CI region */
119 struct rte_intr_handle intr_handle; /* Interrupt related info */
120 int32_t epoll_fd; /**< File descriptor created for interrupt polling */
121 int32_t hw_id; /**< An unique ID of this DPIO device instance */
124 struct dpaa2_dpbp_dev {
125 TAILQ_ENTRY(dpaa2_dpbp_dev) next;
126 /**< Pointer to Next device instance */
127 struct fsl_mc_io dpbp; /** handle to DPBP portal object */
129 rte_atomic16_t in_use;
130 uint32_t dpbp_id; /*HW ID for DPBP object */
133 struct queue_storage_info_t {
134 struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
135 struct qbman_result *active_dqs;
136 uint8_t active_dpio_id;
138 uint8_t last_num_pkts;
143 typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
144 const struct qbman_fd *fd,
145 const struct qbman_result *dq,
146 struct dpaa2_queue *rxq,
147 struct rte_event *ev);
149 typedef void (dpaa2_queue_cb_eqresp_free_t)(uint16_t eqresp_ci);
152 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
154 struct rte_eth_dev_data *eth_data;
155 struct rte_cryptodev_data *crypto_data;
157 int32_t eventfd; /*!< Event Fd of this queue */
158 uint32_t fqid; /*!< Unique ID of this queue */
159 uint8_t tc_index; /*!< traffic class identifier */
160 uint16_t flow_id; /*!< To be used by DPAA2 frmework */
165 struct queue_storage_info_t *q_storage;
166 struct qbman_result *cscn;
169 dpaa2_queue_cb_dqrr_t *cb;
170 dpaa2_queue_cb_eqresp_free_t *cb_eqresp_free;
171 struct dpaa2_bp_info *bp_array;
174 struct swp_active_dqs {
175 struct qbman_result *global_active_dqs;
176 uint64_t reserved[7];
179 #define NUM_MAX_SWP 64
181 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
183 struct dpaa2_dpci_dev {
184 TAILQ_ENTRY(dpaa2_dpci_dev) next;
185 /**< Pointer to Next device instance */
186 struct fsl_mc_io dpci; /** handle to DPCI portal object */
188 rte_atomic16_t in_use;
189 uint32_t dpci_id; /*HW ID for DPCI object */
190 struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
191 struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
194 /*! Global MCP list */
195 extern void *(*rte_mcp_ptr_list);
197 /* Refer to Table 7-3 in SEC BG */
202 /* FMT must be 00, MSB is final bit */
203 uint32_t fin_bpid_offset;
205 uint32_t reserved[3]; /* Not used currently */
212 uint32_t fin_bpid_offset;
215 /* There are three types of frames: Single, Scatter Gather and Frame Lists */
216 enum qbman_fd_format {
221 /*Macros to define operations on FD*/
222 #define DPAA2_SET_FD_ADDR(fd, addr) do { \
223 (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
224 (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
226 #define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
227 #define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
228 #define DPAA2_SET_ONLY_FD_BPID(fd, bpid) \
229 ((fd)->simple.bpid_offset = bpid)
230 #define DPAA2_SET_FD_IVP(fd) (((fd)->simple.bpid_offset |= 0x00004000))
231 #define DPAA2_SET_FD_OFFSET(fd, offset) \
232 (((fd)->simple.bpid_offset |= (uint32_t)(offset) << 16))
233 #define DPAA2_SET_FD_INTERNAL_JD(fd, len) \
234 ((fd)->simple.frc = (0x80000000 | (len)))
235 #define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
236 ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
237 #define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
238 #define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
240 #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
242 #define DPAA2_RESET_FD_FLC(fd) do { \
243 (fd)->simple.flc_lo = 0; \
244 (fd)->simple.flc_hi = 0; \
247 #define DPAA2_SET_FD_FLC(fd, addr) do { \
248 (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
249 (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
251 #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
252 #define DPAA2_GET_FLE_ADDR(fle) \
253 (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
254 #define DPAA2_SET_FLE_ADDR(fle, addr) do { \
255 (fle)->addr_lo = lower_32_bits((size_t)addr); \
256 (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
258 #define DPAA2_GET_FLE_CTXT(fle) \
259 ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
260 #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
261 (fle)->reserved[0] = lower_32_bits((size_t)addr); \
262 (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
264 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
265 ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
266 #define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
267 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
268 #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
269 #define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
270 #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
271 #define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
272 #define DPAA2_SET_FD_COMPOUND_FMT(fd) \
273 ((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
274 #define DPAA2_GET_FD_ADDR(fd) \
275 (((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
277 #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
278 #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
279 #define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
280 #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
281 #define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
282 #define DPAA2_GET_FD_FLC(fd) \
283 (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
284 #define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
285 #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
286 #define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
287 #define DPAA2_IS_SET_FLE_SG_EXT(fle) \
288 (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
290 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
291 ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
293 #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
295 #define DPAA2_FD_SET_FORMAT(fd, format) do { \
296 (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
297 (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
299 #define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
301 #define DPAA2_SG_SET_FINAL(sg, fin) do { \
302 (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
303 (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
305 #define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
306 /* Only Enqueue Error responses will be
307 * pushed on FQID_ERR of Enqueue FQ
309 #define DPAA2_EQ_RESP_ERR_FQ 0
310 /* All Enqueue responses will be pushed on address
311 * set with qbman_eq_desc_set_response
313 #define DPAA2_EQ_RESP_ALWAYS 1
315 /* Various structures representing contiguous memory maps */
316 struct dpaa2_memseg {
317 TAILQ_ENTRY(dpaa2_memseg) next;
323 TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg);
324 extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
326 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
327 extern uint8_t dpaa2_virt_mode;
328 static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
330 static void *dpaa2_mem_ptov(phys_addr_t paddr)
335 return (void *)(size_t)paddr;
337 va = (void *)dpaax_iova_table_get_va(paddr);
338 if (likely(va != NULL))
341 /* If not, Fallback to full memseg list searching */
342 va = rte_mem_iova2virt(paddr);
347 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
349 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
351 const struct rte_memseg *memseg;
356 memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
358 return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
363 * When we are using Physical addresses as IO Virtual Addresses,
364 * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
366 * These routines are called with help of below MACRO's
369 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
372 * macro to convert Virtual address to IOVA
374 #define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
377 * macro to convert IOVA to Virtual address
379 #define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
382 * macro to convert modify the memory containing IOVA to Virtual address
384 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
385 {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
387 #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
389 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
390 #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
391 #define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
392 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
394 #endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
397 int check_swp_active_dqs(uint16_t dpio_index)
399 if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
405 void clear_swp_active_dqs(uint16_t dpio_index)
407 rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
411 struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
413 return rte_global_active_dqs_list[dpio_index].global_active_dqs;
417 void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
419 rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
421 struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
422 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
423 int dpaa2_dpbp_supported(void);
425 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
426 void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);