5ffb460a8bd2d6d5417b7a5c414f147a152e69cb
[dpdk.git] / drivers / event / octeontx / timvf_evdev.c
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2017 Cavium, Inc
4  */
5
6 #include "timvf_evdev.h"
7
8 int otx_logtype_timvf;
9
10 RTE_INIT(otx_timvf_init_log);
11 static void
12 otx_timvf_init_log(void)
13 {
14         otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
15         if (otx_logtype_timvf >= 0)
16                 rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
17 }
18
19 struct __rte_packed timvf_mbox_dev_info {
20         uint64_t ring_active[4];
21         uint64_t clk_freq;
22 };
23
24 /* Response messages */
25 enum {
26         MBOX_RET_SUCCESS,
27         MBOX_RET_INVALID,
28         MBOX_RET_INTERNAL_ERR,
29 };
30
31 static int
32 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
33 {
34         struct octeontx_mbox_hdr hdr = {0};
35         uint16_t len = sizeof(struct timvf_mbox_dev_info);
36
37         hdr.coproc = TIM_COPROC;
38         hdr.msg = TIM_GET_DEV_INFO;
39         hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
40
41         memset(info, 0, len);
42         return octeontx_mbox_send(&hdr, NULL, 0, info, len);
43 }
44
45 static void
46 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
47                 struct rte_event_timer_adapter_info *adptr_info)
48 {
49         struct timvf_ring *timr = adptr->data->adapter_priv;
50         adptr_info->max_tmo_ns = timr->max_tout;
51         adptr_info->min_resolution_ns = timr->tck_nsec;
52         rte_memcpy(&adptr_info->conf, &adptr->data->conf,
53                         sizeof(struct rte_event_timer_adapter_conf));
54 }
55
56 static int
57 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
58 {
59         struct octeontx_mbox_hdr hdr = {0};
60         uint16_t len = sizeof(struct timvf_ctrl_reg);
61         int ret;
62
63         hdr.coproc = TIM_COPROC;
64         hdr.msg = TIM_SET_RING_INFO;
65         hdr.vfid = ring_id;
66
67         ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
68         if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
69                 return -EACCES;
70         return 0;
71 }
72
73 static int
74 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
75 {
76         struct octeontx_mbox_hdr hdr = {0};
77
78         hdr.coproc = TIM_COPROC;
79         hdr.msg = TIM_RING_START_CYC_GET;
80         hdr.vfid = ring_id;
81         *now = 0;
82         return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
83 }
84
85 static int
86 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
87 {
88         int ret;
89         uint64_t interval;
90         struct timvf_ctrl_reg rctrl;
91         struct timvf_mbox_dev_info dinfo;
92         struct timvf_ring *timr = adptr->data->adapter_priv;
93
94         ret = timvf_mbox_dev_info_get(&dinfo);
95         if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
96                 return -EINVAL;
97
98         /* Calculate the interval cycles according to clock source. */
99         switch (timr->clk_src) {
100         case TIM_CLK_SRC_SCLK:
101                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
102                 break;
103         case TIM_CLK_SRC_GPIO:
104                 /* GPIO doesn't work on tck_nsec. */
105                 interval = 0;
106                 break;
107         case TIM_CLK_SRC_GTI:
108                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
109                 break;
110         case TIM_CLK_SRC_PTP:
111                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
112                 break;
113         default:
114                 timvf_log_err("Unsupported clock source configured %d",
115                                 timr->clk_src);
116                 return -EINVAL;
117         }
118
119         /*CTRL0 register.*/
120         rctrl.rctrl0 = interval;
121
122         /*CTRL1 register.*/
123         rctrl.rctrl1 =  (uint64_t)(timr->clk_src) << 51 |
124                 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
125                 1ull << 47 /* ENA */ |
126                 1ull << 44 /* ENA_LDWB */ |
127                 (timr->nb_bkts - 1);
128
129         rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
130
131         timvf_write64((uintptr_t)timr->bkt,
132                         (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
133         timvf_set_chunk_refill(timr);
134         if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
135                 ret = -EACCES;
136                 goto error;
137         }
138
139         if (timvf_get_start_cyc(&timr->ring_start_cyc,
140                                 timr->tim_ring_id) < 0) {
141                 ret = -EACCES;
142                 goto error;
143         }
144         timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
145         timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
146         timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
147                         " maxtmo %"PRIu64"\n",
148                         timr->nb_bkts, timr->tck_nsec, interval,
149                         timr->max_tout);
150
151         return 0;
152 error:
153         rte_free(timr->bkt);
154         rte_mempool_free(timr->chunk_pool);
155         return ret;
156 }
157
158 static int
159 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
160 {
161         struct timvf_ring *timr = adptr->data->adapter_priv;
162         struct timvf_ctrl_reg rctrl = {0};
163         rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
164         rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
165         rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
166         rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
167
168         if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
169                 return -EACCES;
170         return 0;
171 }
172
173 static int
174 timvf_ring_create(struct rte_event_timer_adapter *adptr)
175 {
176         char pool_name[25];
177         int ret;
178         uint64_t nb_timers;
179         struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
180         struct timvf_ring *timr;
181         struct timvf_info tinfo;
182         const char *mempool_ops;
183
184         if (timvf_info(&tinfo) < 0)
185                 return -ENODEV;
186
187         if (adptr->data->id >= tinfo.total_timvfs)
188                 return -ENODEV;
189
190         timr = rte_zmalloc("octeontx_timvf_priv",
191                         sizeof(struct timvf_ring), 0);
192         if (timr == NULL)
193                 return -ENOMEM;
194
195         adptr->data->adapter_priv = timr;
196         /* Check config parameters. */
197         if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
198                         (!rcfg->timer_tick_ns ||
199                          rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
200                 timvf_log_err("Too low timer ticks");
201                 goto cfg_err;
202         }
203
204         timr->clk_src = (int) rcfg->clk_src;
205         timr->tim_ring_id = adptr->data->id;
206         timr->tck_nsec = rcfg->timer_tick_ns;
207         timr->max_tout = rcfg->max_tmo_ns;
208         timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
209         timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
210         timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
211         nb_timers = rcfg->nb_timers;
212         timr->get_target_bkt = bkt_mod;
213
214         timr->nb_chunks = nb_timers / nb_chunk_slots;
215
216         timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
217                         (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
218                         0);
219         if (timr->bkt == NULL)
220                 goto mem_err;
221
222         snprintf(pool_name, 30, "timvf_chunk_pool%d", timr->tim_ring_id);
223         timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
224                         timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
225                         0);
226
227         if (!timr->chunk_pool) {
228                 rte_free(timr->bkt);
229                 timvf_log_err("Unable to create chunkpool.");
230                 return -ENOMEM;
231         }
232
233         mempool_ops = rte_mbuf_best_mempool_ops();
234         ret = rte_mempool_set_ops_byname(timr->chunk_pool,
235                         mempool_ops, NULL);
236
237         if (ret != 0) {
238                 timvf_log_err("Unable to set chunkpool ops.");
239                 goto mem_err;
240         }
241
242         ret = rte_mempool_populate_default(timr->chunk_pool);
243         if (ret < 0) {
244                 timvf_log_err("Unable to set populate chunkpool.");
245                 goto mem_err;
246         }
247         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
248         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
249         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
250         timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
251         timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
252
253         return 0;
254 mem_err:
255         rte_free(timr);
256         return -ENOMEM;
257 cfg_err:
258         rte_free(timr);
259         return -EINVAL;
260 }
261
262 static int
263 timvf_ring_free(struct rte_event_timer_adapter *adptr)
264 {
265         struct timvf_ring *timr = adptr->data->adapter_priv;
266         rte_mempool_free(timr->chunk_pool);
267         rte_free(timr->bkt);
268         rte_free(adptr->data->adapter_priv);
269         return 0;
270 }
271
272 static int
273 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
274                 struct rte_event_timer_adapter_stats *stats)
275 {
276         struct timvf_ring *timr = adapter->data->adapter_priv;
277         uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
278
279         stats->evtim_exp_count = timr->tim_arm_cnt;
280         stats->ev_enq_count = timr->tim_arm_cnt;
281         stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
282                                 &timr->fast_div);
283         return 0;
284 }
285
286 static int
287 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
288 {
289         struct timvf_ring *timr = adapter->data->adapter_priv;
290
291         timr->tim_arm_cnt = 0;
292         return 0;
293 }
294
295 static struct rte_event_timer_adapter_ops timvf_ops = {
296         .init           = timvf_ring_create,
297         .uninit         = timvf_ring_free,
298         .start          = timvf_ring_start,
299         .stop           = timvf_ring_stop,
300         .get_info       = timvf_ring_info_get,
301 };
302
303 int
304 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
305                 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
306                 uint8_t enable_stats)
307 {
308         RTE_SET_USED(dev);
309         RTE_SET_USED(flags);
310
311         if (enable_stats) {
312                 timvf_ops.stats_get   = timvf_stats_get;
313                 timvf_ops.stats_reset = timvf_stats_reset;
314         }
315
316         if (enable_stats)
317                 timvf_ops.arm_burst = timvf_timer_arm_burst_mp_stats;
318         else
319                 timvf_ops.arm_burst = timvf_timer_arm_burst_mp;
320
321         timvf_ops.cancel_burst = timvf_timer_cancel_burst;
322         *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
323         *ops = &timvf_ops;
324         return 0;
325 }