1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "timvf_evdev.h"
9 RTE_INIT(otx_timvf_init_log);
11 otx_timvf_init_log(void)
13 otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
14 if (otx_logtype_timvf >= 0)
15 rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
18 struct __rte_packed timvf_mbox_dev_info {
19 uint64_t ring_active[4];
23 /* Response messages */
27 MBOX_RET_INTERNAL_ERR,
31 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
33 struct octeontx_mbox_hdr hdr = {0};
34 uint16_t len = sizeof(struct timvf_mbox_dev_info);
36 hdr.coproc = TIM_COPROC;
37 hdr.msg = TIM_GET_DEV_INFO;
38 hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
41 return octeontx_mbox_send(&hdr, NULL, 0, info, len);
45 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
46 struct rte_event_timer_adapter_info *adptr_info)
48 struct timvf_ring *timr = adptr->data->adapter_priv;
49 adptr_info->max_tmo_ns = timr->max_tout;
50 adptr_info->min_resolution_ns = timr->tck_nsec;
51 rte_memcpy(&adptr_info->conf, &adptr->data->conf,
52 sizeof(struct rte_event_timer_adapter_conf));
56 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
58 struct octeontx_mbox_hdr hdr = {0};
59 uint16_t len = sizeof(struct timvf_ctrl_reg);
62 hdr.coproc = TIM_COPROC;
63 hdr.msg = TIM_SET_RING_INFO;
66 ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
67 if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
73 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
75 struct octeontx_mbox_hdr hdr = {0};
77 hdr.coproc = TIM_COPROC;
78 hdr.msg = TIM_RING_START_CYC_GET;
81 return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
85 optimize_bucket_parameters(struct timvf_ring *timr)
91 hbkts = rte_align32pow2(timr->nb_bkts);
92 tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
94 if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
97 lbkts = rte_align32prevpow2(timr->nb_bkts);
98 tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
100 if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
103 if (!hbkts && !lbkts)
107 timr->nb_bkts = lbkts;
110 timr->nb_bkts = hbkts;
114 timr->nb_bkts = (hbkts - timr->nb_bkts) <
115 (timr->nb_bkts - lbkts) ? hbkts : lbkts;
117 timr->get_target_bkt = bkt_and;
118 timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
119 (timr->nb_bkts - 1)), 10);
124 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
130 struct timvf_ctrl_reg rctrl;
131 struct timvf_mbox_dev_info dinfo;
132 struct timvf_ring *timr = adptr->data->adapter_priv;
134 ret = timvf_mbox_dev_info_get(&dinfo);
135 if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
138 /* Calculate the interval cycles according to clock source. */
139 switch (timr->clk_src) {
140 case TIM_CLK_SRC_SCLK:
141 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
143 case TIM_CLK_SRC_GPIO:
144 /* GPIO doesn't work on tck_nsec. */
147 case TIM_CLK_SRC_GTI:
148 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
150 case TIM_CLK_SRC_PTP:
151 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
154 timvf_log_err("Unsupported clock source configured %d",
159 if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
163 rctrl.rctrl0 = interval;
166 rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
167 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
168 1ull << 47 /* ENA */ |
169 1ull << 44 /* ENA_LDWB */ |
172 rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
175 pool = (uintptr_t)((struct rte_mempool *)
176 timr->chunk_pool)->pool_id;
177 ret = octeontx_fpa_bufpool_gpool(pool);
179 timvf_log_dbg("Unable to get gaura id");
183 timvf_write64((uint64_t)ret,
184 (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
186 rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
189 timvf_write64((uintptr_t)timr->bkt,
190 (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
191 timvf_set_chunk_refill(timr, use_fpa);
192 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
197 if (timvf_get_start_cyc(&timr->ring_start_cyc,
198 timr->tim_ring_id) < 0) {
202 timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
203 timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
204 timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
205 " maxtmo %"PRIu64"\n",
206 timr->nb_bkts, timr->tck_nsec, interval,
212 rte_mempool_free(timr->chunk_pool);
217 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
219 struct timvf_ring *timr = adptr->data->adapter_priv;
220 struct timvf_ctrl_reg rctrl = {0};
221 rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
222 rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
223 rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
224 rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
226 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
232 timvf_ring_create(struct rte_event_timer_adapter *adptr)
237 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
238 struct timvf_ring *timr;
239 struct timvf_info tinfo;
240 const char *mempool_ops;
241 unsigned int mp_flags = 0;
243 if (timvf_info(&tinfo) < 0)
246 if (adptr->data->id >= tinfo.total_timvfs)
249 timr = rte_zmalloc("octeontx_timvf_priv",
250 sizeof(struct timvf_ring), 0);
254 adptr->data->adapter_priv = timr;
255 /* Check config parameters. */
256 if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
257 (!rcfg->timer_tick_ns ||
258 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
259 timvf_log_err("Too low timer ticks");
263 timr->clk_src = (int) rcfg->clk_src;
264 timr->tim_ring_id = adptr->data->id;
265 timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
266 timr->max_tout = rcfg->max_tmo_ns;
267 timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
268 timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
269 timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
270 nb_timers = rcfg->nb_timers;
271 timr->get_target_bkt = bkt_mod;
273 timr->nb_chunks = nb_timers / nb_chunk_slots;
275 /* Try to optimize the bucket parameters. */
276 if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
277 && !rte_is_power_of_2(timr->nb_bkts)) {
278 if (optimize_bucket_parameters(timr)) {
279 timvf_log_info("Optimized configured values");
280 timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
281 timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
283 timvf_log_info("Failed to Optimize configured values");
286 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
287 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
288 timvf_log_info("Using single producer mode");
291 timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
292 (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
294 if (timr->bkt == NULL)
297 snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
299 timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
300 timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
303 if (!timr->chunk_pool) {
305 timvf_log_err("Unable to create chunkpool.");
309 mempool_ops = rte_mbuf_best_mempool_ops();
310 ret = rte_mempool_set_ops_byname(timr->chunk_pool,
314 timvf_log_err("Unable to set chunkpool ops.");
318 ret = rte_mempool_populate_default(timr->chunk_pool);
320 timvf_log_err("Unable to set populate chunkpool.");
323 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
324 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
325 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
326 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
327 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
339 timvf_ring_free(struct rte_event_timer_adapter *adptr)
341 struct timvf_ring *timr = adptr->data->adapter_priv;
342 rte_mempool_free(timr->chunk_pool);
344 rte_free(adptr->data->adapter_priv);
349 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
350 struct rte_event_timer_adapter_stats *stats)
352 struct timvf_ring *timr = adapter->data->adapter_priv;
353 uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
355 stats->evtim_exp_count = timr->tim_arm_cnt;
356 stats->ev_enq_count = timr->tim_arm_cnt;
357 stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
363 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
365 struct timvf_ring *timr = adapter->data->adapter_priv;
367 timr->tim_arm_cnt = 0;
371 static struct rte_event_timer_adapter_ops timvf_ops = {
372 .init = timvf_ring_create,
373 .uninit = timvf_ring_free,
374 .start = timvf_ring_start,
375 .stop = timvf_ring_stop,
376 .get_info = timvf_ring_info_get,
380 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
381 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
382 uint8_t enable_stats)
387 timvf_ops.stats_get = timvf_stats_get;
388 timvf_ops.stats_reset = timvf_stats_reset;
391 if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
392 timvf_ops.arm_burst = enable_stats ?
393 timvf_timer_arm_burst_sp_stats :
394 timvf_timer_arm_burst_sp;
396 timvf_ops.arm_burst = enable_stats ?
397 timvf_timer_arm_burst_mp_stats :
398 timvf_timer_arm_burst_mp;
400 timvf_ops.arm_tmo_tick_burst = enable_stats ?
401 timvf_timer_arm_tmo_brst_stats :
402 timvf_timer_arm_tmo_brst;
403 timvf_ops.cancel_burst = timvf_timer_cancel_burst;
404 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;