1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_MEMPOOL_H__
6 #define __OTX2_MEMPOOL_H__
8 #include <rte_bitmap.h>
9 #include <rte_bus_pci.h>
10 #include <rte_devargs.h>
11 #include <rte_mempool.h>
13 #include "otx2_common.h"
14 #include "otx2_mbox.h"
17 NPA_LF_ERR_PARAM = -512,
18 NPA_LF_ERR_ALLOC = -513,
19 NPA_LF_ERR_INVALID_BLOCK_SZ = -514,
20 NPA_LF_ERR_AURA_ID_ALLOC = -515,
21 NPA_LF_ERR_AURA_POOL_INIT = -516,
22 NPA_LF_ERR_AURA_POOL_FINI = -517,
23 NPA_LF_ERR_BASE_INVALID = -518,
27 struct otx2_npa_qint {
28 struct otx2_npa_lf *lf;
46 struct otx2_mbox *mbox;
47 uint32_t stack_pg_ptrs;
48 uint32_t stack_pg_bytes;
49 struct rte_bitmap *npa_bmp;
50 struct npa_aura_lim *aura_lim;
51 struct rte_pci_device *pci_dev;
52 struct rte_intr_handle *intr_handle;
55 #define AURA_ID_MASK (BIT_ULL(16) - 1)
58 * Generate 64bit handle to have optimized alloc and free aura operation.
59 * 0 - AURA_ID_MASK for storing the aura_id.
60 * AURA_ID_MASK+1 - (2^64 - 1) for storing the lf base address.
61 * This scheme is valid when OS can give AURA_ID_MASK
62 * aligned address for lf base address.
64 static inline uint64_t
65 npa_lf_aura_handle_gen(uint32_t aura_id, uintptr_t addr)
69 val = aura_id & AURA_ID_MASK;
70 return (uint64_t)addr | val;
73 static inline uint64_t
74 npa_lf_aura_handle_to_aura(uint64_t aura_handle)
76 return aura_handle & AURA_ID_MASK;
79 static inline uintptr_t
80 npa_lf_aura_handle_to_base(uint64_t aura_handle)
82 return (uintptr_t)(aura_handle & ~AURA_ID_MASK);
85 static inline uint64_t
86 npa_lf_aura_op_alloc(uint64_t aura_handle, const int drop)
88 uint64_t wdata = npa_lf_aura_handle_to_aura(aura_handle);
91 wdata |= BIT_ULL(63); /* DROP */
93 return otx2_atomic64_add_nosync(wdata,
94 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
95 NPA_LF_AURA_OP_ALLOCX(0)));
99 npa_lf_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)
101 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
104 reg |= BIT_ULL(63); /* FABS */
106 otx2_store_pair(iova, reg,
107 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0);
110 static inline uint64_t
111 npa_lf_aura_op_cnt_get(uint64_t aura_handle)
116 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
118 reg = otx2_atomic64_add_nosync(wdata,
119 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
120 NPA_LF_AURA_OP_CNT));
122 if (reg & BIT_ULL(42) /* OP_ERR */)
125 return reg & 0xFFFFFFFFF;
129 npa_lf_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)
131 uint64_t reg = count & (BIT_ULL(36) - 1);
134 reg |= BIT_ULL(43); /* CNT_ADD */
136 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
139 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_CNT);
142 static inline uint64_t
143 npa_lf_aura_op_limit_get(uint64_t aura_handle)
148 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
150 reg = otx2_atomic64_add_nosync(wdata,
151 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
152 NPA_LF_AURA_OP_LIMIT));
154 if (reg & BIT_ULL(42) /* OP_ERR */)
157 return reg & 0xFFFFFFFFF;
161 npa_lf_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)
163 uint64_t reg = limit & (BIT_ULL(36) - 1);
165 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
168 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_LIMIT);
171 static inline uint64_t
172 npa_lf_aura_op_available(uint64_t aura_handle)
177 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
179 reg = otx2_atomic64_add_nosync(wdata,
180 (int64_t *)(npa_lf_aura_handle_to_base(
181 aura_handle) + NPA_LF_POOL_OP_AVAILABLE));
183 if (reg & BIT_ULL(42) /* OP_ERR */)
186 return reg & 0xFFFFFFFFF;
190 npa_lf_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
193 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
194 struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
195 struct npa_aura_lim *lim = lf->aura_lim;
197 lim[reg].ptr_start = RTE_MIN(lim[reg].ptr_start, start_iova);
198 lim[reg].ptr_end = RTE_MAX(lim[reg].ptr_end, end_iova);
200 otx2_store_pair(lim[reg].ptr_start, reg,
201 npa_lf_aura_handle_to_base(aura_handle) +
202 NPA_LF_POOL_OP_PTR_START0);
203 otx2_store_pair(lim[reg].ptr_end, reg,
204 npa_lf_aura_handle_to_base(aura_handle) +
205 NPA_LF_POOL_OP_PTR_END0);
210 int otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev);
212 int otx2_npa_lf_fini(void);
215 int otx2_npa_register_irqs(struct otx2_npa_lf *lf);
216 void otx2_npa_unregister_irqs(struct otx2_npa_lf *lf);
219 int otx2_mempool_ctx_dump(struct otx2_npa_lf *lf);
221 #endif /* __OTX2_MEMPOOL_H__ */