1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_kvargs.h>
6 #include <rte_malloc.h>
7 #include <rte_mbuf_pool_ops.h>
9 #include "otx2_evdev.h"
10 #include "otx2_tim_evdev.h"
12 static struct rte_event_timer_adapter_ops otx2_tim_ops;
15 tim_get_msix_offsets(void)
17 struct otx2_tim_evdev *dev = tim_priv_get();
18 struct otx2_mbox *mbox = dev->mbox;
19 struct msix_offset_rsp *msix_rsp;
22 /* Get TIM MSIX vector offsets */
23 otx2_mbox_alloc_msg_msix_offset(mbox);
24 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
26 for (i = 0; i < dev->nb_rings; i++)
27 dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
33 tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
39 hbkts = rte_align32pow2(tim_ring->nb_bkts);
40 tck_nsec = RTE_ALIGN_MUL_CEIL(tim_ring->max_tout / (hbkts - 1), 10);
42 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
43 tim_ring->tenns_clk_freq) ||
44 hbkts > OTX2_TIM_MAX_BUCKETS))
47 lbkts = rte_align32prevpow2(tim_ring->nb_bkts);
48 tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout / (lbkts - 1)), 10);
50 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
51 tim_ring->tenns_clk_freq) ||
52 lbkts > OTX2_TIM_MAX_BUCKETS))
59 tim_ring->nb_bkts = lbkts;
62 tim_ring->nb_bkts = hbkts;
66 tim_ring->nb_bkts = (hbkts - tim_ring->nb_bkts) <
67 (tim_ring->nb_bkts - lbkts) ? hbkts : lbkts;
69 tim_ring->optimized = true;
70 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout /
71 (tim_ring->nb_bkts - 1)), 10);
72 otx2_tim_dbg("Optimized configured values");
73 otx2_tim_dbg("Nb_bkts : %" PRIu32 "", tim_ring->nb_bkts);
74 otx2_tim_dbg("Tck_nsec : %" PRIu64 "", tim_ring->tck_nsec);
78 tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
79 struct rte_event_timer_adapter_conf *rcfg)
81 unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
82 unsigned int mp_flags = 0;
86 /* Create chunk pool. */
87 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
88 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
89 otx2_tim_dbg("Using single producer mode");
90 tim_ring->prod_type_sp = true;
93 snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
96 if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
97 cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
99 if (!tim_ring->disable_npa) {
100 /* NPA need not have cache as free is not visible to SW */
101 tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
102 tim_ring->nb_chunks, tim_ring->chunk_sz,
103 0, 0, rte_socket_id(), mp_flags);
105 if (tim_ring->chunk_pool == NULL) {
106 otx2_err("Unable to create chunkpool.");
110 rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
111 rte_mbuf_platform_mempool_ops(),
114 otx2_err("Unable to set chunkpool ops");
118 rc = rte_mempool_populate_default(tim_ring->chunk_pool);
120 otx2_err("Unable to set populate chunkpool.");
123 tim_ring->aura = npa_lf_aura_handle_to_aura(
124 tim_ring->chunk_pool->pool_id);
125 tim_ring->ena_dfb = 0;
127 tim_ring->chunk_pool = rte_mempool_create(pool_name,
128 tim_ring->nb_chunks, tim_ring->chunk_sz,
129 cache_sz, 0, NULL, NULL, NULL, NULL,
132 if (tim_ring->chunk_pool == NULL) {
133 otx2_err("Unable to create chunkpool.");
136 tim_ring->ena_dfb = 1;
142 rte_mempool_free(tim_ring->chunk_pool);
150 case TIM_AF_NO_RINGS_LEFT:
151 otx2_err("Unable to allocat new TIM ring.");
153 case TIM_AF_INVALID_NPA_PF_FUNC:
154 otx2_err("Invalid NPA pf func.");
156 case TIM_AF_INVALID_SSO_PF_FUNC:
157 otx2_err("Invalid SSO pf func.");
159 case TIM_AF_RING_STILL_RUNNING:
160 otx2_tim_dbg("Ring busy.");
162 case TIM_AF_LF_INVALID:
163 otx2_err("Invalid Ring id.");
165 case TIM_AF_CSIZE_NOT_ALIGNED:
166 otx2_err("Chunk size specified needs to be multiple of 16.");
168 case TIM_AF_CSIZE_TOO_SMALL:
169 otx2_err("Chunk size too small.");
171 case TIM_AF_CSIZE_TOO_BIG:
172 otx2_err("Chunk size too big.");
174 case TIM_AF_INTERVAL_TOO_SMALL:
175 otx2_err("Bucket traversal interval too small.");
177 case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
178 otx2_err("Invalid Big endian value.");
180 case TIM_AF_INVALID_CLOCK_SOURCE:
181 otx2_err("Invalid Clock source specified.");
183 case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
184 otx2_err("GPIO clock source not enabled.");
186 case TIM_AF_INVALID_BSIZE:
187 otx2_err("Invalid bucket size.");
189 case TIM_AF_INVALID_ENABLE_PERIODIC:
190 otx2_err("Invalid bucket size.");
192 case TIM_AF_INVALID_ENABLE_DONTFREE:
193 otx2_err("Invalid Don't free value.");
195 case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
196 otx2_err("Don't free bit not set when periodic is enabled.");
198 case TIM_AF_RING_ALREADY_DISABLED:
199 otx2_err("Ring already stopped");
202 otx2_err("Unknown Error.");
207 otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
209 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
210 struct otx2_tim_evdev *dev = tim_priv_get();
211 struct otx2_tim_ring *tim_ring;
212 struct tim_config_req *cfg_req;
213 struct tim_ring_req *free_req;
214 struct tim_lf_alloc_req *req;
215 struct tim_lf_alloc_rsp *rsp;
222 if (adptr->data->id >= dev->nb_rings)
225 req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
226 req->npa_pf_func = otx2_npa_pf_func_get();
227 req->sso_pf_func = otx2_sso_pf_func_get();
228 req->ring = adptr->data->id;
230 rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
236 if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
237 rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
238 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
239 rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
247 tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
248 if (tim_ring == NULL) {
253 adptr->data->adapter_priv = tim_ring;
255 tim_ring->tenns_clk_freq = rsp->tenns_clk;
256 tim_ring->clk_src = (int)rcfg->clk_src;
257 tim_ring->ring_id = adptr->data->id;
258 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
259 tim_ring->max_tout = rcfg->max_tmo_ns;
260 tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
261 tim_ring->chunk_sz = dev->chunk_sz;
262 nb_timers = rcfg->nb_timers;
263 tim_ring->disable_npa = dev->disable_npa;
265 tim_ring->nb_chunks = nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
267 tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
269 /* Try to optimize the bucket parameters. */
270 if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
271 if (rte_is_power_of_2(tim_ring->nb_bkts))
272 tim_ring->optimized = true;
274 tim_optimze_bkt_param(tim_ring);
277 tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
278 /* Create buckets. */
279 tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
280 sizeof(struct otx2_tim_bkt),
281 RTE_CACHE_LINE_SIZE);
282 if (tim_ring->bkt == NULL)
285 rc = tim_chnk_pool_create(tim_ring, rcfg);
289 cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
291 cfg_req->ring = tim_ring->ring_id;
292 cfg_req->bigendian = false;
293 cfg_req->clocksource = tim_ring->clk_src;
294 cfg_req->enableperiodic = false;
295 cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
296 cfg_req->bucketsize = tim_ring->nb_bkts;
297 cfg_req->chunksize = tim_ring->chunk_sz;
298 cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
299 tim_ring->tenns_clk_freq);
301 rc = otx2_mbox_process(dev->mbox);
307 tim_ring->base = dev->bar2 +
308 (RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
310 rc = tim_register_irq(tim_ring->ring_id);
314 otx2_write64((uint64_t)tim_ring->bkt,
315 tim_ring->base + TIM_LF_RING_BASE);
316 otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
321 rte_free(tim_ring->bkt);
325 free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
326 free_req->ring = adptr->data->id;
327 otx2_mbox_process(dev->mbox);
332 otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
334 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
335 struct otx2_tim_evdev *dev = tim_priv_get();
336 struct tim_ring_req *req;
342 tim_unregister_irq(tim_ring->ring_id);
344 req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
345 req->ring = tim_ring->ring_id;
347 rc = otx2_mbox_process(dev->mbox);
353 rte_free(tim_ring->bkt);
354 rte_mempool_free(tim_ring->chunk_pool);
355 rte_free(adptr->data->adapter_priv);
361 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
363 const struct rte_event_timer_adapter_ops **ops)
365 struct otx2_tim_evdev *dev = tim_priv_get();
371 otx2_tim_ops.init = otx2_tim_ring_create;
372 otx2_tim_ops.uninit = otx2_tim_ring_free;
374 /* Store evdev pointer for later use. */
375 dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
376 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
377 *ops = &otx2_tim_ops;
382 #define OTX2_TIM_DISABLE_NPA "tim_disable_npa"
383 #define OTX2_TIM_CHNK_SLOTS "tim_chnk_slots"
386 tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
388 struct rte_kvargs *kvlist;
393 kvlist = rte_kvargs_parse(devargs->args, NULL);
397 rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
398 &parse_kvargs_flag, &dev->disable_npa);
399 rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
400 &parse_kvargs_value, &dev->chunk_slots);
404 otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
406 struct rsrc_attach_req *atch_req;
407 struct rsrc_detach_req *dtch_req;
408 struct free_rsrcs_rsp *rsrc_cnt;
409 const struct rte_memzone *mz;
410 struct otx2_tim_evdev *dev;
413 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
416 mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
417 sizeof(struct otx2_tim_evdev),
420 otx2_tim_dbg("Unable to allocate memory for TIM Event device");
425 dev->pci_dev = pci_dev;
426 dev->mbox = cmn_dev->mbox;
427 dev->bar2 = cmn_dev->bar2;
429 tim_parse_devargs(pci_dev->device.devargs, dev);
431 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
432 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
434 otx2_err("Unable to get free rsrc count.");
438 dev->nb_rings = rsrc_cnt->tim;
440 if (!dev->nb_rings) {
441 otx2_tim_dbg("No TIM Logical functions provisioned.");
445 atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
446 atch_req->modify = true;
447 atch_req->timlfs = dev->nb_rings;
449 rc = otx2_mbox_process(dev->mbox);
451 otx2_err("Unable to attach TIM rings.");
455 rc = tim_get_msix_offsets();
457 otx2_err("Unable to get MSIX offsets for TIM.");
461 if (dev->chunk_slots &&
462 dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
463 dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
464 dev->chunk_sz = (dev->chunk_slots + 1) *
465 OTX2_TIM_CHUNK_ALIGNMENT;
467 dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
473 dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
474 dtch_req->partial = true;
475 dtch_req->timlfs = true;
477 otx2_mbox_process(dev->mbox);
479 rte_memzone_free(mz);
485 struct otx2_tim_evdev *dev = tim_priv_get();
486 struct rsrc_detach_req *dtch_req;
488 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
491 dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
492 dtch_req->partial = true;
493 dtch_req->timlfs = true;
495 otx2_mbox_process(dev->mbox);
496 rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));