1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017-2020 NXP
6 #ifndef __RTE_DPAA_BUS_H__
7 #define __RTE_DPAA_BUS_H__
10 #include <rte_mbuf_dyn.h>
11 #include <rte_mempool.h>
12 #include <dpaax_iova_table.h>
20 /* This sequence number field is used to store event entry index for
21 * driver specific usage. For parallel mode queues, invalid
22 * index will be set and for atomic mode queues, valid value
23 * ranging from 1 to 16.
25 #define DPAA_INVALID_MBUF_SEQN 0
27 typedef uint32_t dpaa_seqn_t;
28 extern int dpaa_seqn_dynfield_offset;
31 * Read dpaa sequence number from mbuf.
33 * @param mbuf Structure to read from.
34 * @return pointer to dpaa sequence number.
37 static inline dpaa_seqn_t *
38 dpaa_seqn(struct rte_mbuf *mbuf)
40 return RTE_MBUF_DYNFIELD(mbuf, dpaa_seqn_dynfield_offset,
44 #define DPAA_MEMPOOL_OPS_NAME "dpaa"
46 #define DEV_TO_DPAA_DEVICE(ptr) \
47 container_of(ptr, struct rte_dpaa_device, device)
49 /* DPAA SoC identifier; If this is not available, it can be concluded
50 * that board is non-DPAA. Single slot is currently supported.
52 #define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
54 #define SVR_LS1043A_FAMILY 0x87920000
55 #define SVR_LS1046A_FAMILY 0x87070000
56 #define SVR_MASK 0xffff0000
58 /** Device driver supports link state interrupt */
59 #define RTE_DPAA_DRV_INTR_LSC 0x0008
61 #define RTE_DEV_TO_DPAA_CONST(ptr) \
62 container_of(ptr, const struct rte_dpaa_device, device)
64 extern unsigned int dpaa_svr_family;
66 struct rte_dpaa_device;
67 struct rte_dpaa_driver;
69 /* DPAA Device and Driver lists for DPAA bus */
70 TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
71 TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
80 struct rte_dpaa_device_list device_list;
81 struct rte_dpaa_driver_list driver_list;
86 struct dpaa_device_id {
87 uint8_t fman_id; /**< Fman interface ID, for ETH type device */
88 uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
89 uint16_t dev_id; /**< Device Identifier from DPDK */
92 struct rte_dpaa_device {
93 TAILQ_ENTRY(rte_dpaa_device) next;
94 struct rte_device device;
96 struct rte_eth_dev *eth_dev;
97 struct rte_cryptodev *crypto_dev;
99 struct rte_dpaa_driver *driver;
100 struct dpaa_device_id id;
101 struct rte_intr_handle intr_handle;
102 enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
103 char name[RTE_ETH_NAME_MAX_LEN];
106 typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
107 struct rte_dpaa_device *dpaa_dev);
108 typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
110 struct rte_dpaa_driver {
111 TAILQ_ENTRY(rte_dpaa_driver) next;
112 struct rte_driver driver;
113 struct rte_dpaa_bus *dpaa_bus;
114 enum rte_dpaa_type drv_type;
115 rte_dpaa_probe_t probe;
116 rte_dpaa_remove_t remove;
117 uint32_t drv_flags; /**< Flags for controlling device.*/
120 /* Create storage for dqrr entries per lcore */
121 #define DPAA_PORTAL_DEQUEUE_DEPTH 16
122 struct dpaa_portal_dqrr {
123 void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
129 uint32_t bman_idx; /**< BMAN Portal ID*/
130 uint32_t qman_idx; /**< QMAN Portal ID*/
131 struct dpaa_portal_dqrr dpaa_held_bufs;
132 struct rte_crypto_op **dpaa_sec_ops;
134 uint64_t tid;/**< Parent Thread id for this portal */
137 RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
139 #define DPAA_PER_LCORE_PORTAL \
140 RTE_PER_LCORE(dpaa_io)
141 #define DPAA_PER_LCORE_DQRR_SIZE \
142 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
143 #define DPAA_PER_LCORE_DQRR_HELD \
144 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
145 #define DPAA_PER_LCORE_DQRR_MBUF(i) \
146 RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
147 #define DPAA_PER_LCORE_RTE_CRYPTO_OP \
148 RTE_PER_LCORE(dpaa_io)->dpaa_sec_ops
149 #define DPAA_PER_LCORE_DPAA_SEC_OP_NB \
150 RTE_PER_LCORE(dpaa_io)->dpaa_sec_op_nb
152 /* Various structures representing contiguous memory maps */
154 TAILQ_ENTRY(dpaa_memseg) next;
160 TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
161 extern struct dpaa_memseg_list rte_dpaa_memsegs;
163 /* Either iterate over the list of internal memseg references or fallback to
164 * EAL memseg based iova2virt.
166 static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
168 struct dpaa_memseg *ms;
171 va = dpaax_iova_table_get_va(paddr);
172 if (likely(va != NULL))
175 /* Check if the address is already part of the memseg list internally
176 * maintained by the dpaa driver.
178 TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
179 if (paddr >= ms->iova && paddr <
181 return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
184 /* If not, Fallback to full memseg list searching */
185 va = rte_mem_iova2virt(paddr);
187 dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
192 static inline rte_iova_t
193 rte_dpaa_mem_vtop(void *vaddr)
195 const struct rte_memseg *ms;
197 ms = rte_mem_virt2memseg(vaddr, NULL);
199 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
205 * Register a DPAA driver.
208 * A pointer to a rte_dpaa_driver structure describing the driver
212 void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
215 * Unregister a DPAA driver.
218 * A pointer to a rte_dpaa_driver structure describing the driver
219 * to be unregistered.
222 void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
225 * Initialize a DPAA portal
231 * 0 in case of success, error otherwise
234 int rte_dpaa_portal_init(void *arg);
237 int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
240 int rte_dpaa_portal_fq_close(struct qman_fq *fq);
243 * Cleanup a DPAA Portal
245 void dpaa_portal_finish(void *arg);
247 /** Helper for DPAA device registration from driver (eth, crypto) instance */
248 #define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
249 RTE_INIT(dpaainitfn_ ##nm) \
251 (dpaa_drv).driver.name = RTE_STR(nm);\
252 rte_dpaa_driver_register(&dpaa_drv); \
254 RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
257 struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
263 #endif /* __RTE_DPAA_BUS_H__ */