1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_kvargs.h>
6 #include <rte_malloc.h>
7 #include <rte_mbuf_pool_ops.h>
9 #include "otx2_evdev.h"
10 #include "otx2_tim_evdev.h"
12 static struct rte_event_timer_adapter_ops otx2_tim_ops;
15 tim_get_msix_offsets(void)
17 struct otx2_tim_evdev *dev = tim_priv_get();
18 struct otx2_mbox *mbox = dev->mbox;
19 struct msix_offset_rsp *msix_rsp;
22 /* Get TIM MSIX vector offsets */
23 otx2_mbox_alloc_msg_msix_offset(mbox);
24 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
26 for (i = 0; i < dev->nb_rings; i++)
27 dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
33 tim_set_fp_ops(struct otx2_tim_ring *tim_ring)
35 uint8_t prod_flag = !tim_ring->prod_type_sp;
37 /* [MOD/AND] [DFB/FB] [SP][MP]*/
38 const rte_event_timer_arm_burst_t arm_burst[2][2][2] = {
39 #define FP(_name, _f3, _f2, _f1, flags) \
40 [_f3][_f2][_f1] = otx2_tim_arm_burst_ ## _name,
41 TIM_ARM_FASTPATH_MODES
45 const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2] = {
46 #define FP(_name, _f2, _f1, flags) \
47 [_f2][_f1] = otx2_tim_arm_tmo_tick_burst_ ## _name,
48 TIM_ARM_TMO_FASTPATH_MODES
52 otx2_tim_ops.arm_burst = arm_burst[tim_ring->optimized]
53 [tim_ring->ena_dfb][prod_flag];
54 otx2_tim_ops.arm_tmo_tick_burst = arm_tmo_burst[tim_ring->optimized]
59 otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
60 struct rte_event_timer_adapter_info *adptr_info)
62 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
64 adptr_info->max_tmo_ns = tim_ring->max_tout;
65 adptr_info->min_resolution_ns = tim_ring->tck_nsec;
66 rte_memcpy(&adptr_info->conf, &adptr->data->conf,
67 sizeof(struct rte_event_timer_adapter_conf));
71 tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
77 hbkts = rte_align32pow2(tim_ring->nb_bkts);
78 tck_nsec = RTE_ALIGN_MUL_CEIL(tim_ring->max_tout / (hbkts - 1), 10);
80 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
81 tim_ring->tenns_clk_freq) ||
82 hbkts > OTX2_TIM_MAX_BUCKETS))
85 lbkts = rte_align32prevpow2(tim_ring->nb_bkts);
86 tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout / (lbkts - 1)), 10);
88 if ((tck_nsec < TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
89 tim_ring->tenns_clk_freq) ||
90 lbkts > OTX2_TIM_MAX_BUCKETS))
97 tim_ring->nb_bkts = lbkts;
100 tim_ring->nb_bkts = hbkts;
104 tim_ring->nb_bkts = (hbkts - tim_ring->nb_bkts) <
105 (tim_ring->nb_bkts - lbkts) ? hbkts : lbkts;
107 tim_ring->optimized = true;
108 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL((tim_ring->max_tout /
109 (tim_ring->nb_bkts - 1)), 10);
110 otx2_tim_dbg("Optimized configured values");
111 otx2_tim_dbg("Nb_bkts : %" PRIu32 "", tim_ring->nb_bkts);
112 otx2_tim_dbg("Tck_nsec : %" PRIu64 "", tim_ring->tck_nsec);
116 tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
117 struct rte_event_timer_adapter_conf *rcfg)
119 unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
120 unsigned int mp_flags = 0;
124 /* Create chunk pool. */
125 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
126 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
127 otx2_tim_dbg("Using single producer mode");
128 tim_ring->prod_type_sp = true;
131 snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
134 if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
135 cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
137 if (!tim_ring->disable_npa) {
138 /* NPA need not have cache as free is not visible to SW */
139 tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
140 tim_ring->nb_chunks, tim_ring->chunk_sz,
141 0, 0, rte_socket_id(), mp_flags);
143 if (tim_ring->chunk_pool == NULL) {
144 otx2_err("Unable to create chunkpool.");
148 rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
149 rte_mbuf_platform_mempool_ops(),
152 otx2_err("Unable to set chunkpool ops");
156 rc = rte_mempool_populate_default(tim_ring->chunk_pool);
158 otx2_err("Unable to set populate chunkpool.");
161 tim_ring->aura = npa_lf_aura_handle_to_aura(
162 tim_ring->chunk_pool->pool_id);
163 tim_ring->ena_dfb = 0;
165 tim_ring->chunk_pool = rte_mempool_create(pool_name,
166 tim_ring->nb_chunks, tim_ring->chunk_sz,
167 cache_sz, 0, NULL, NULL, NULL, NULL,
170 if (tim_ring->chunk_pool == NULL) {
171 otx2_err("Unable to create chunkpool.");
174 tim_ring->ena_dfb = 1;
180 rte_mempool_free(tim_ring->chunk_pool);
188 case TIM_AF_NO_RINGS_LEFT:
189 otx2_err("Unable to allocat new TIM ring.");
191 case TIM_AF_INVALID_NPA_PF_FUNC:
192 otx2_err("Invalid NPA pf func.");
194 case TIM_AF_INVALID_SSO_PF_FUNC:
195 otx2_err("Invalid SSO pf func.");
197 case TIM_AF_RING_STILL_RUNNING:
198 otx2_tim_dbg("Ring busy.");
200 case TIM_AF_LF_INVALID:
201 otx2_err("Invalid Ring id.");
203 case TIM_AF_CSIZE_NOT_ALIGNED:
204 otx2_err("Chunk size specified needs to be multiple of 16.");
206 case TIM_AF_CSIZE_TOO_SMALL:
207 otx2_err("Chunk size too small.");
209 case TIM_AF_CSIZE_TOO_BIG:
210 otx2_err("Chunk size too big.");
212 case TIM_AF_INTERVAL_TOO_SMALL:
213 otx2_err("Bucket traversal interval too small.");
215 case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
216 otx2_err("Invalid Big endian value.");
218 case TIM_AF_INVALID_CLOCK_SOURCE:
219 otx2_err("Invalid Clock source specified.");
221 case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
222 otx2_err("GPIO clock source not enabled.");
224 case TIM_AF_INVALID_BSIZE:
225 otx2_err("Invalid bucket size.");
227 case TIM_AF_INVALID_ENABLE_PERIODIC:
228 otx2_err("Invalid bucket size.");
230 case TIM_AF_INVALID_ENABLE_DONTFREE:
231 otx2_err("Invalid Don't free value.");
233 case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
234 otx2_err("Don't free bit not set when periodic is enabled.");
236 case TIM_AF_RING_ALREADY_DISABLED:
237 otx2_err("Ring already stopped");
240 otx2_err("Unknown Error.");
245 otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
247 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
248 struct otx2_tim_evdev *dev = tim_priv_get();
249 struct otx2_tim_ring *tim_ring;
250 struct tim_config_req *cfg_req;
251 struct tim_ring_req *free_req;
252 struct tim_lf_alloc_req *req;
253 struct tim_lf_alloc_rsp *rsp;
260 if (adptr->data->id >= dev->nb_rings)
263 req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
264 req->npa_pf_func = otx2_npa_pf_func_get();
265 req->sso_pf_func = otx2_sso_pf_func_get();
266 req->ring = adptr->data->id;
268 rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
274 if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
275 rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
276 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
277 rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
285 tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
286 if (tim_ring == NULL) {
291 adptr->data->adapter_priv = tim_ring;
293 tim_ring->tenns_clk_freq = rsp->tenns_clk;
294 tim_ring->clk_src = (int)rcfg->clk_src;
295 tim_ring->ring_id = adptr->data->id;
296 tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
297 tim_ring->max_tout = rcfg->max_tmo_ns;
298 tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
299 tim_ring->chunk_sz = dev->chunk_sz;
300 nb_timers = rcfg->nb_timers;
301 tim_ring->disable_npa = dev->disable_npa;
303 tim_ring->nb_chunks = nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
305 tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
307 /* Try to optimize the bucket parameters. */
308 if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
309 if (rte_is_power_of_2(tim_ring->nb_bkts))
310 tim_ring->optimized = true;
312 tim_optimze_bkt_param(tim_ring);
315 tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
316 /* Create buckets. */
317 tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
318 sizeof(struct otx2_tim_bkt),
319 RTE_CACHE_LINE_SIZE);
320 if (tim_ring->bkt == NULL)
323 rc = tim_chnk_pool_create(tim_ring, rcfg);
327 cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
329 cfg_req->ring = tim_ring->ring_id;
330 cfg_req->bigendian = false;
331 cfg_req->clocksource = tim_ring->clk_src;
332 cfg_req->enableperiodic = false;
333 cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
334 cfg_req->bucketsize = tim_ring->nb_bkts;
335 cfg_req->chunksize = tim_ring->chunk_sz;
336 cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
337 tim_ring->tenns_clk_freq);
339 rc = otx2_mbox_process(dev->mbox);
345 tim_ring->base = dev->bar2 +
346 (RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
348 rc = tim_register_irq(tim_ring->ring_id);
352 otx2_write64((uint64_t)tim_ring->bkt,
353 tim_ring->base + TIM_LF_RING_BASE);
354 otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
356 /* Set fastpath ops. */
357 tim_set_fp_ops(tim_ring);
359 /* Update SSO xae count. */
360 sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers,
361 RTE_EVENT_TYPE_TIMER);
362 sso_xae_reconfigure(dev->event_dev);
367 rte_free(tim_ring->bkt);
371 free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
372 free_req->ring = adptr->data->id;
373 otx2_mbox_process(dev->mbox);
378 otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
380 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
381 struct otx2_tim_evdev *dev = tim_priv_get();
382 struct tim_ring_req *req;
388 tim_unregister_irq(tim_ring->ring_id);
390 req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
391 req->ring = tim_ring->ring_id;
393 rc = otx2_mbox_process(dev->mbox);
399 rte_free(tim_ring->bkt);
400 rte_mempool_free(tim_ring->chunk_pool);
401 rte_free(adptr->data->adapter_priv);
407 otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
409 const struct rte_event_timer_adapter_ops **ops)
411 struct otx2_tim_evdev *dev = tim_priv_get();
417 otx2_tim_ops.init = otx2_tim_ring_create;
418 otx2_tim_ops.uninit = otx2_tim_ring_free;
419 otx2_tim_ops.get_info = otx2_tim_ring_info_get;
421 /* Store evdev pointer for later use. */
422 dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
423 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
424 *ops = &otx2_tim_ops;
429 #define OTX2_TIM_DISABLE_NPA "tim_disable_npa"
430 #define OTX2_TIM_CHNK_SLOTS "tim_chnk_slots"
433 tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
435 struct rte_kvargs *kvlist;
440 kvlist = rte_kvargs_parse(devargs->args, NULL);
444 rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
445 &parse_kvargs_flag, &dev->disable_npa);
446 rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
447 &parse_kvargs_value, &dev->chunk_slots);
451 otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
453 struct rsrc_attach_req *atch_req;
454 struct rsrc_detach_req *dtch_req;
455 struct free_rsrcs_rsp *rsrc_cnt;
456 const struct rte_memzone *mz;
457 struct otx2_tim_evdev *dev;
460 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
463 mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
464 sizeof(struct otx2_tim_evdev),
467 otx2_tim_dbg("Unable to allocate memory for TIM Event device");
472 dev->pci_dev = pci_dev;
473 dev->mbox = cmn_dev->mbox;
474 dev->bar2 = cmn_dev->bar2;
476 tim_parse_devargs(pci_dev->device.devargs, dev);
478 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
479 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
481 otx2_err("Unable to get free rsrc count.");
485 dev->nb_rings = rsrc_cnt->tim;
487 if (!dev->nb_rings) {
488 otx2_tim_dbg("No TIM Logical functions provisioned.");
492 atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
493 atch_req->modify = true;
494 atch_req->timlfs = dev->nb_rings;
496 rc = otx2_mbox_process(dev->mbox);
498 otx2_err("Unable to attach TIM rings.");
502 rc = tim_get_msix_offsets();
504 otx2_err("Unable to get MSIX offsets for TIM.");
508 if (dev->chunk_slots &&
509 dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
510 dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
511 dev->chunk_sz = (dev->chunk_slots + 1) *
512 OTX2_TIM_CHUNK_ALIGNMENT;
514 dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
520 dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
521 dtch_req->partial = true;
522 dtch_req->timlfs = true;
524 otx2_mbox_process(dev->mbox);
526 rte_memzone_free(mz);
532 struct otx2_tim_evdev *dev = tim_priv_get();
533 struct rsrc_detach_req *dtch_req;
535 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
538 dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
539 dtch_req->partial = true;
540 dtch_req->timlfs = true;
542 otx2_mbox_process(dev->mbox);
543 rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));