1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
8 #include "otx2_evdev.h"
9 #include "otx2_tim_evdev.h"
11 static struct rte_event_timer_adapter_ops otx2_tim_ops;
14 tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
20 hbkts = rte_align32pow2(tim_ring->nb_bkts);
21 tck_nsec = RTE_ALIGN_MUL_CEIL(tim_ring->max_tout / (hbkts - 1), 10);
23 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
24 tim_ring->tenns_clk_freq) ||
25 hbkts > OTX2_TIM_MAX_BUCKETS))
28 lbkts = rte_align32prevpow2(tim_ring->nb_bkts);
29 tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout / (lbkts - 1)), 10);
31 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
32 tim_ring->tenns_clk_freq) ||
33 lbkts > OTX2_TIM_MAX_BUCKETS))
40 tim_ring->nb_bkts = lbkts;
43 tim_ring->nb_bkts = hbkts;
47 tim_ring->nb_bkts = (hbkts - tim_ring->nb_bkts) <
48 (tim_ring->nb_bkts - lbkts) ? hbkts : lbkts;
50 tim_ring->optimized = true;
51 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout /
52 (tim_ring->nb_bkts - 1)), 10);
53 otx2_tim_dbg("Optimized configured values");
54 otx2_tim_dbg("Nb_bkts : %" PRIu32 "", tim_ring->nb_bkts);
55 otx2_tim_dbg("Tck_nsec : %" PRIu64 "", tim_ring->tck_nsec);
59 tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
60 struct rte_event_timer_adapter_conf *rcfg)
62 unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
63 unsigned int mp_flags = 0;
67 /* Create chunk pool. */
68 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
69 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
70 otx2_tim_dbg("Using single producer mode");
71 tim_ring->prod_type_sp = true;
74 snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
77 if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
78 cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
80 /* NPA need not have cache as free is not visible to SW */
81 tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
84 0, 0, rte_socket_id(),
87 if (tim_ring->chunk_pool == NULL) {
88 otx2_err("Unable to create chunkpool.");
92 rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
93 rte_mbuf_platform_mempool_ops(), NULL);
95 otx2_err("Unable to set chunkpool ops");
99 rc = rte_mempool_populate_default(tim_ring->chunk_pool);
101 otx2_err("Unable to set populate chunkpool.");
104 tim_ring->aura = npa_lf_aura_handle_to_aura(
105 tim_ring->chunk_pool->pool_id);
106 tim_ring->ena_dfb = 0;
111 rte_mempool_free(tim_ring->chunk_pool);
119 case TIM_AF_NO_RINGS_LEFT:
120 otx2_err("Unable to allocat new TIM ring.");
122 case TIM_AF_INVALID_NPA_PF_FUNC:
123 otx2_err("Invalid NPA pf func.");
125 case TIM_AF_INVALID_SSO_PF_FUNC:
126 otx2_err("Invalid SSO pf func.");
128 case TIM_AF_RING_STILL_RUNNING:
129 otx2_tim_dbg("Ring busy.");
131 case TIM_AF_LF_INVALID:
132 otx2_err("Invalid Ring id.");
134 case TIM_AF_CSIZE_NOT_ALIGNED:
135 otx2_err("Chunk size specified needs to be multiple of 16.");
137 case TIM_AF_CSIZE_TOO_SMALL:
138 otx2_err("Chunk size too small.");
140 case TIM_AF_CSIZE_TOO_BIG:
141 otx2_err("Chunk size too big.");
143 case TIM_AF_INTERVAL_TOO_SMALL:
144 otx2_err("Bucket traversal interval too small.");
146 case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
147 otx2_err("Invalid Big endian value.");
149 case TIM_AF_INVALID_CLOCK_SOURCE:
150 otx2_err("Invalid Clock source specified.");
152 case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
153 otx2_err("GPIO clock source not enabled.");
155 case TIM_AF_INVALID_BSIZE:
156 otx2_err("Invalid bucket size.");
158 case TIM_AF_INVALID_ENABLE_PERIODIC:
159 otx2_err("Invalid bucket size.");
161 case TIM_AF_INVALID_ENABLE_DONTFREE:
162 otx2_err("Invalid Don't free value.");
164 case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
165 otx2_err("Don't free bit not set when periodic is enabled.");
167 case TIM_AF_RING_ALREADY_DISABLED:
168 otx2_err("Ring already stopped");
171 otx2_err("Unknown Error.");
176 otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
178 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
179 struct otx2_tim_evdev *dev = tim_priv_get();
180 struct otx2_tim_ring *tim_ring;
181 struct tim_config_req *cfg_req;
182 struct tim_ring_req *free_req;
183 struct tim_lf_alloc_req *req;
184 struct tim_lf_alloc_rsp *rsp;
191 if (adptr->data->id >= dev->nb_rings)
194 req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
195 req->npa_pf_func = otx2_npa_pf_func_get();
196 req->sso_pf_func = otx2_sso_pf_func_get();
197 req->ring = adptr->data->id;
199 rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
205 if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
206 rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
207 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
208 rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
216 tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
217 if (tim_ring == NULL) {
222 adptr->data->adapter_priv = tim_ring;
224 tim_ring->tenns_clk_freq = rsp->tenns_clk;
225 tim_ring->clk_src = (int)rcfg->clk_src;
226 tim_ring->ring_id = adptr->data->id;
227 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
228 tim_ring->max_tout = rcfg->max_tmo_ns;
229 tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
230 tim_ring->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
231 nb_timers = rcfg->nb_timers;
232 tim_ring->nb_chunks = nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
234 tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
236 /* Try to optimize the bucket parameters. */
237 if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
238 if (rte_is_power_of_2(tim_ring->nb_bkts))
239 tim_ring->optimized = true;
241 tim_optimze_bkt_param(tim_ring);
244 tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
245 /* Create buckets. */
246 tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
247 sizeof(struct otx2_tim_bkt),
248 RTE_CACHE_LINE_SIZE);
249 if (tim_ring->bkt == NULL)
252 rc = tim_chnk_pool_create(tim_ring, rcfg);
256 cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
258 cfg_req->ring = tim_ring->ring_id;
259 cfg_req->bigendian = false;
260 cfg_req->clocksource = tim_ring->clk_src;
261 cfg_req->enableperiodic = false;
262 cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
263 cfg_req->bucketsize = tim_ring->nb_bkts;
264 cfg_req->chunksize = tim_ring->chunk_sz;
265 cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
266 tim_ring->tenns_clk_freq);
268 rc = otx2_mbox_process(dev->mbox);
274 tim_ring->base = dev->bar2 +
275 (RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
277 otx2_write64((uint64_t)tim_ring->bkt,
278 tim_ring->base + TIM_LF_RING_BASE);
279 otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
284 rte_free(tim_ring->bkt);
288 free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
289 free_req->ring = adptr->data->id;
290 otx2_mbox_process(dev->mbox);
295 otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
297 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
298 struct otx2_tim_evdev *dev = tim_priv_get();
299 struct tim_ring_req *req;
305 req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
306 req->ring = tim_ring->ring_id;
308 rc = otx2_mbox_process(dev->mbox);
314 rte_free(tim_ring->bkt);
315 rte_mempool_free(tim_ring->chunk_pool);
316 rte_free(adptr->data->adapter_priv);
322 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
324 const struct rte_event_timer_adapter_ops **ops)
326 struct otx2_tim_evdev *dev = tim_priv_get();
332 otx2_tim_ops.init = otx2_tim_ring_create;
333 otx2_tim_ops.uninit = otx2_tim_ring_free;
335 /* Store evdev pointer for later use. */
336 dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
337 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
338 *ops = &otx2_tim_ops;
344 otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
346 struct rsrc_attach_req *atch_req;
347 struct free_rsrcs_rsp *rsrc_cnt;
348 const struct rte_memzone *mz;
349 struct otx2_tim_evdev *dev;
352 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
355 mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
356 sizeof(struct otx2_tim_evdev),
359 otx2_tim_dbg("Unable to allocate memory for TIM Event device");
364 dev->pci_dev = pci_dev;
365 dev->mbox = cmn_dev->mbox;
366 dev->bar2 = cmn_dev->bar2;
368 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
369 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
371 otx2_err("Unable to get free rsrc count.");
375 dev->nb_rings = rsrc_cnt->tim;
377 if (!dev->nb_rings) {
378 otx2_tim_dbg("No TIM Logical functions provisioned.");
382 atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
383 atch_req->modify = true;
384 atch_req->timlfs = dev->nb_rings;
386 rc = otx2_mbox_process(dev->mbox);
388 otx2_err("Unable to attach TIM rings.");
395 rte_memzone_free(mz);
401 struct otx2_tim_evdev *dev = tim_priv_get();
402 struct rsrc_detach_req *dtch_req;
404 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
407 dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
408 dtch_req->partial = true;
409 dtch_req->timlfs = true;
411 otx2_mbox_process(dev->mbox);
412 rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));