1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_MEMPOOL_H__
6 #define __OTX2_MEMPOOL_H__
8 #include <rte_bitmap.h>
9 #include <rte_bus_pci.h>
10 #include <rte_devargs.h>
11 #include <rte_mempool.h>
13 #include "otx2_common.h"
14 #include "otx2_mbox.h"
17 NPA_LF_ERR_PARAM = -512,
18 NPA_LF_ERR_ALLOC = -513,
19 NPA_LF_ERR_INVALID_BLOCK_SZ = -514,
20 NPA_LF_ERR_AURA_ID_ALLOC = -515,
21 NPA_LF_ERR_AURA_POOL_INIT = -516,
22 NPA_LF_ERR_AURA_POOL_FINI = -517,
23 NPA_LF_ERR_BASE_INVALID = -518,
27 struct otx2_npa_qint {
28 struct otx2_npa_lf *lf;
41 struct otx2_mbox *mbox;
42 uint32_t stack_pg_ptrs;
43 uint32_t stack_pg_bytes;
44 struct rte_bitmap *npa_bmp;
45 struct rte_pci_device *pci_dev;
46 struct rte_intr_handle *intr_handle;
49 #define AURA_ID_MASK (BIT_ULL(16) - 1)
52 * Generate 64bit handle to have optimized alloc and free aura operation.
53 * 0 - AURA_ID_MASK for storing the aura_id.
54 * AURA_ID_MASK+1 - (2^64 - 1) for storing the lf base address.
55 * This scheme is valid when OS can give AURA_ID_MASK
56 * aligned address for lf base address.
58 static inline uint64_t
59 npa_lf_aura_handle_gen(uint32_t aura_id, uintptr_t addr)
63 val = aura_id & AURA_ID_MASK;
64 return (uint64_t)addr | val;
67 static inline uint64_t
68 npa_lf_aura_handle_to_aura(uint64_t aura_handle)
70 return aura_handle & AURA_ID_MASK;
73 static inline uintptr_t
74 npa_lf_aura_handle_to_base(uint64_t aura_handle)
76 return (uintptr_t)(aura_handle & ~AURA_ID_MASK);
79 static inline uint64_t
80 npa_lf_aura_op_alloc(uint64_t aura_handle, const int drop)
82 uint64_t wdata = npa_lf_aura_handle_to_aura(aura_handle);
85 wdata |= BIT_ULL(63); /* DROP */
87 return otx2_atomic64_add_nosync(wdata,
88 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
89 NPA_LF_AURA_OP_ALLOCX(0)));
93 npa_lf_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)
95 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
98 reg |= BIT_ULL(63); /* FABS */
100 otx2_store_pair(iova, reg,
101 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0);
104 static inline uint64_t
105 npa_lf_aura_op_cnt_get(uint64_t aura_handle)
110 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
112 reg = otx2_atomic64_add_nosync(wdata,
113 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
114 NPA_LF_AURA_OP_CNT));
116 if (reg & BIT_ULL(42) /* OP_ERR */)
119 return reg & 0xFFFFFFFFF;
123 npa_lf_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)
125 uint64_t reg = count & (BIT_ULL(36) - 1);
128 reg |= BIT_ULL(43); /* CNT_ADD */
130 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
133 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_CNT);
136 static inline uint64_t
137 npa_lf_aura_op_limit_get(uint64_t aura_handle)
142 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
144 reg = otx2_atomic64_add_nosync(wdata,
145 (int64_t *)(npa_lf_aura_handle_to_base(aura_handle) +
146 NPA_LF_AURA_OP_LIMIT));
148 if (reg & BIT_ULL(42) /* OP_ERR */)
151 return reg & 0xFFFFFFFFF;
155 npa_lf_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)
157 uint64_t reg = limit & (BIT_ULL(36) - 1);
159 reg |= (npa_lf_aura_handle_to_aura(aura_handle) << 44);
162 npa_lf_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_LIMIT);
165 static inline uint64_t
166 npa_lf_aura_op_available(uint64_t aura_handle)
171 wdata = npa_lf_aura_handle_to_aura(aura_handle) << 44;
173 reg = otx2_atomic64_add_nosync(wdata,
174 (int64_t *)(npa_lf_aura_handle_to_base(
175 aura_handle) + NPA_LF_POOL_OP_AVAILABLE));
177 if (reg & BIT_ULL(42) /* OP_ERR */)
180 return reg & 0xFFFFFFFFF;
184 npa_lf_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
187 uint64_t reg = npa_lf_aura_handle_to_aura(aura_handle);
189 otx2_store_pair(start_iova, reg,
190 npa_lf_aura_handle_to_base(aura_handle) +
191 NPA_LF_POOL_OP_PTR_START0);
192 otx2_store_pair(end_iova, reg,
193 npa_lf_aura_handle_to_base(aura_handle) +
194 NPA_LF_POOL_OP_PTR_END0);
198 int otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev);
199 int otx2_npa_lf_fini(void);
202 int otx2_npa_register_irqs(struct otx2_npa_lf *lf);
203 void otx2_npa_unregister_irqs(struct otx2_npa_lf *lf);
205 #endif /* __OTX2_MEMPOOL_H__ */