1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "timvf_evdev.h"
7 RTE_LOG_REGISTER(otx_logtype_timvf, pmd.event.octeontx.timer, NOTICE);
9 struct __rte_packed timvf_mbox_dev_info {
10 uint64_t ring_active[4];
14 /* Response messages */
18 MBOX_RET_INTERNAL_ERR,
22 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
24 struct octeontx_mbox_hdr hdr = {0};
25 uint16_t len = sizeof(struct timvf_mbox_dev_info);
27 hdr.coproc = TIM_COPROC;
28 hdr.msg = TIM_GET_DEV_INFO;
29 hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
32 return octeontx_mbox_send(&hdr, NULL, 0, info, len);
36 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
37 struct rte_event_timer_adapter_info *adptr_info)
39 struct timvf_ring *timr = adptr->data->adapter_priv;
40 adptr_info->max_tmo_ns = timr->max_tout;
41 adptr_info->min_resolution_ns = timr->tck_nsec;
42 rte_memcpy(&adptr_info->conf, &adptr->data->conf,
43 sizeof(struct rte_event_timer_adapter_conf));
47 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
49 struct octeontx_mbox_hdr hdr = {0};
50 uint16_t len = sizeof(struct timvf_ctrl_reg);
53 hdr.coproc = TIM_COPROC;
54 hdr.msg = TIM_SET_RING_INFO;
57 ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
58 if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
64 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
66 struct octeontx_mbox_hdr hdr = {0};
68 hdr.coproc = TIM_COPROC;
69 hdr.msg = TIM_RING_START_CYC_GET;
72 return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
76 optimize_bucket_parameters(struct timvf_ring *timr)
82 hbkts = rte_align32pow2(timr->nb_bkts);
83 tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
85 if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
88 lbkts = rte_align32prevpow2(timr->nb_bkts);
89 tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
91 if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
98 timr->nb_bkts = lbkts;
101 timr->nb_bkts = hbkts;
105 timr->nb_bkts = (hbkts - timr->nb_bkts) <
106 (timr->nb_bkts - lbkts) ? hbkts : lbkts;
108 timr->get_target_bkt = bkt_and;
109 timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
110 (timr->nb_bkts - 1)), 10);
115 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
121 struct timvf_ctrl_reg rctrl;
122 struct timvf_mbox_dev_info dinfo;
123 struct timvf_ring *timr = adptr->data->adapter_priv;
125 ret = timvf_mbox_dev_info_get(&dinfo);
126 if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
129 /* Calculate the interval cycles according to clock source. */
130 switch (timr->clk_src) {
131 case TIM_CLK_SRC_SCLK:
132 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
134 case TIM_CLK_SRC_GPIO:
135 /* GPIO doesn't work on tck_nsec. */
138 case TIM_CLK_SRC_GTI:
139 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
141 case TIM_CLK_SRC_PTP:
142 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
145 timvf_log_err("Unsupported clock source configured %d",
150 if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
154 rctrl.rctrl0 = interval;
157 rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
158 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
159 1ull << 47 /* ENA */ |
160 1ull << 44 /* ENA_LDWB */ |
163 rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
166 pool = (uintptr_t)((struct rte_mempool *)
167 timr->chunk_pool)->pool_id;
168 ret = octeontx_fpa_bufpool_gaura(pool);
170 timvf_log_dbg("Unable to get gaura id");
174 timvf_write64((uint64_t)ret,
175 (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
177 rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
180 timvf_write64((uintptr_t)timr->bkt,
181 (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
182 timvf_set_chunk_refill(timr, use_fpa);
183 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
188 if (timvf_get_start_cyc(&timr->ring_start_cyc,
189 timr->tim_ring_id) < 0) {
193 timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
194 timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
195 timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
196 " maxtmo %"PRIu64"\n",
197 timr->nb_bkts, timr->tck_nsec, interval,
203 rte_mempool_free(timr->chunk_pool);
208 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
210 struct timvf_ring *timr = adptr->data->adapter_priv;
211 struct timvf_ctrl_reg rctrl = {0};
212 rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
213 rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
214 rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
215 rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
217 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
223 timvf_ring_create(struct rte_event_timer_adapter *adptr)
229 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
230 struct timvf_ring *timr;
231 const char *mempool_ops;
232 unsigned int mp_flags = 0;
234 tim_ring_id = timvf_get_ring();
235 if (tim_ring_id == UINT8_MAX)
238 timr = rte_zmalloc("octeontx_timvf_priv",
239 sizeof(struct timvf_ring), 0);
243 adptr->data->adapter_priv = timr;
244 /* Check config parameters. */
245 if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
246 (!rcfg->timer_tick_ns ||
247 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
248 timvf_log_err("Too low timer ticks");
252 timr->clk_src = (int) rcfg->clk_src;
253 timr->tim_ring_id = tim_ring_id;
254 timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
255 timr->max_tout = rcfg->max_tmo_ns;
256 timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
257 timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
258 timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
259 nb_timers = rcfg->nb_timers;
260 timr->get_target_bkt = bkt_mod;
262 timr->nb_chunks = nb_timers / nb_chunk_slots;
264 /* Try to optimize the bucket parameters. */
265 if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
266 && !rte_is_power_of_2(timr->nb_bkts)) {
267 if (optimize_bucket_parameters(timr)) {
268 timvf_log_info("Optimized configured values");
269 timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
270 timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
272 timvf_log_info("Failed to Optimize configured values");
275 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
276 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
277 timvf_log_info("Using single producer mode");
280 timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
281 (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
283 if (timr->bkt == NULL)
286 snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
288 timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
289 timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
292 if (!timr->chunk_pool) {
294 timvf_log_err("Unable to create chunkpool.");
298 mempool_ops = rte_mbuf_best_mempool_ops();
299 ret = rte_mempool_set_ops_byname(timr->chunk_pool,
303 timvf_log_err("Unable to set chunkpool ops.");
307 ret = rte_mempool_populate_default(timr->chunk_pool);
309 timvf_log_err("Unable to set populate chunkpool.");
312 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
313 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
314 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
315 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
316 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
328 timvf_ring_free(struct rte_event_timer_adapter *adptr)
330 struct timvf_ring *timr = adptr->data->adapter_priv;
332 rte_mempool_free(timr->chunk_pool);
334 timvf_release_ring(timr->tim_ring_id);
335 rte_free(adptr->data->adapter_priv);
340 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
341 struct rte_event_timer_adapter_stats *stats)
343 struct timvf_ring *timr = adapter->data->adapter_priv;
344 uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
346 stats->evtim_exp_count = timr->tim_arm_cnt;
347 stats->ev_enq_count = timr->tim_arm_cnt;
348 stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
354 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
356 struct timvf_ring *timr = adapter->data->adapter_priv;
358 timr->tim_arm_cnt = 0;
362 static struct rte_event_timer_adapter_ops timvf_ops = {
363 .init = timvf_ring_create,
364 .uninit = timvf_ring_free,
365 .start = timvf_ring_start,
366 .stop = timvf_ring_stop,
367 .get_info = timvf_ring_info_get,
371 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
372 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
373 uint8_t enable_stats)
378 timvf_ops.stats_get = timvf_stats_get;
379 timvf_ops.stats_reset = timvf_stats_reset;
382 if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
383 timvf_ops.arm_burst = enable_stats ?
384 timvf_timer_arm_burst_sp_stats :
385 timvf_timer_arm_burst_sp;
387 timvf_ops.arm_burst = enable_stats ?
388 timvf_timer_arm_burst_mp_stats :
389 timvf_timer_arm_burst_mp;
391 timvf_ops.arm_tmo_tick_burst = enable_stats ?
392 timvf_timer_arm_tmo_brst_stats :
393 timvf_timer_arm_tmo_brst;
394 timvf_ops.cancel_burst = timvf_timer_cancel_burst;
395 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;