2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2017 Cavium, Inc
6 #include "timvf_evdev.h"
10 RTE_INIT(otx_timvf_init_log);
12 otx_timvf_init_log(void)
14 otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
15 if (otx_logtype_timvf >= 0)
16 rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
19 struct __rte_packed timvf_mbox_dev_info {
20 uint64_t ring_active[4];
24 /* Response messages */
28 MBOX_RET_INTERNAL_ERR,
32 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
34 struct octeontx_mbox_hdr hdr = {0};
35 uint16_t len = sizeof(struct timvf_mbox_dev_info);
37 hdr.coproc = TIM_COPROC;
38 hdr.msg = TIM_GET_DEV_INFO;
39 hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
42 return octeontx_mbox_send(&hdr, NULL, 0, info, len);
46 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
47 struct rte_event_timer_adapter_info *adptr_info)
49 struct timvf_ring *timr = adptr->data->adapter_priv;
50 adptr_info->max_tmo_ns = timr->max_tout;
51 adptr_info->min_resolution_ns = timr->tck_nsec;
52 rte_memcpy(&adptr_info->conf, &adptr->data->conf,
53 sizeof(struct rte_event_timer_adapter_conf));
57 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
59 struct octeontx_mbox_hdr hdr = {0};
60 uint16_t len = sizeof(struct timvf_ctrl_reg);
63 hdr.coproc = TIM_COPROC;
64 hdr.msg = TIM_SET_RING_INFO;
67 ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
68 if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
74 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
76 struct octeontx_mbox_hdr hdr = {0};
78 hdr.coproc = TIM_COPROC;
79 hdr.msg = TIM_RING_START_CYC_GET;
82 return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
86 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
90 struct timvf_ctrl_reg rctrl;
91 struct timvf_mbox_dev_info dinfo;
92 struct timvf_ring *timr = adptr->data->adapter_priv;
94 ret = timvf_mbox_dev_info_get(&dinfo);
95 if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
98 /* Calculate the interval cycles according to clock source. */
99 switch (timr->clk_src) {
100 case TIM_CLK_SRC_SCLK:
101 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
103 case TIM_CLK_SRC_GPIO:
104 /* GPIO doesn't work on tck_nsec. */
107 case TIM_CLK_SRC_GTI:
108 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
110 case TIM_CLK_SRC_PTP:
111 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
114 timvf_log_err("Unsupported clock source configured %d",
120 rctrl.rctrl0 = interval;
123 rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
124 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
125 1ull << 47 /* ENA */ |
126 1ull << 44 /* ENA_LDWB */ |
129 rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
131 timvf_write64((uintptr_t)timr->bkt,
132 (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
133 timvf_set_chunk_refill(timr);
134 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
139 if (timvf_get_start_cyc(&timr->ring_start_cyc,
140 timr->tim_ring_id) < 0) {
144 timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
145 timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
146 timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
147 " maxtmo %"PRIu64"\n",
148 timr->nb_bkts, timr->tck_nsec, interval,
154 rte_mempool_free(timr->chunk_pool);
159 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
161 struct timvf_ring *timr = adptr->data->adapter_priv;
162 struct timvf_ctrl_reg rctrl = {0};
163 rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
164 rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
165 rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
166 rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
168 if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
174 timvf_ring_create(struct rte_event_timer_adapter *adptr)
179 struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
180 struct timvf_ring *timr;
181 struct timvf_info tinfo;
182 const char *mempool_ops;
184 if (timvf_info(&tinfo) < 0)
187 if (adptr->data->id >= tinfo.total_timvfs)
190 timr = rte_zmalloc("octeontx_timvf_priv",
191 sizeof(struct timvf_ring), 0);
195 adptr->data->adapter_priv = timr;
196 /* Check config parameters. */
197 if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
198 (!rcfg->timer_tick_ns ||
199 rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
200 timvf_log_err("Too low timer ticks");
204 timr->clk_src = (int) rcfg->clk_src;
205 timr->tim_ring_id = adptr->data->id;
206 timr->tck_nsec = rcfg->timer_tick_ns;
207 timr->max_tout = rcfg->max_tmo_ns;
208 timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
209 timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
210 timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
211 nb_timers = rcfg->nb_timers;
212 timr->get_target_bkt = bkt_mod;
214 timr->nb_chunks = nb_timers / nb_chunk_slots;
216 timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
217 (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
219 if (timr->bkt == NULL)
222 snprintf(pool_name, 30, "timvf_chunk_pool%d", timr->tim_ring_id);
223 timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
224 timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
227 if (!timr->chunk_pool) {
229 timvf_log_err("Unable to create chunkpool.");
233 mempool_ops = rte_mbuf_best_mempool_ops();
234 ret = rte_mempool_set_ops_byname(timr->chunk_pool,
238 timvf_log_err("Unable to set chunkpool ops.");
242 ret = rte_mempool_populate_default(timr->chunk_pool);
244 timvf_log_err("Unable to set populate chunkpool.");
247 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
248 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
249 timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
250 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
251 timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
263 timvf_ring_free(struct rte_event_timer_adapter *adptr)
265 struct timvf_ring *timr = adptr->data->adapter_priv;
266 rte_mempool_free(timr->chunk_pool);
268 rte_free(adptr->data->adapter_priv);
273 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
274 struct rte_event_timer_adapter_stats *stats)
276 struct timvf_ring *timr = adapter->data->adapter_priv;
277 uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
279 stats->evtim_exp_count = timr->tim_arm_cnt;
280 stats->ev_enq_count = timr->tim_arm_cnt;
281 stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
287 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
289 struct timvf_ring *timr = adapter->data->adapter_priv;
291 timr->tim_arm_cnt = 0;
295 static struct rte_event_timer_adapter_ops timvf_ops = {
296 .init = timvf_ring_create,
297 .uninit = timvf_ring_free,
298 .start = timvf_ring_start,
299 .stop = timvf_ring_stop,
300 .get_info = timvf_ring_info_get,
304 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
305 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
306 uint8_t enable_stats)
312 timvf_ops.stats_get = timvf_stats_get;
313 timvf_ops.stats_reset = timvf_stats_reset;
317 timvf_ops.arm_burst = timvf_timer_arm_burst_mp_stats;
319 timvf_ops.arm_burst = timvf_timer_arm_burst_mp;
321 timvf_ops.cancel_burst = timvf_timer_cancel_burst;
322 *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;