event/cnxk: add external clock support for timer
[dpdk.git] / drivers / event / cnxk / cnxk_tim_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <math.h>
6
7 #include "cnxk_eventdev.h"
8 #include "cnxk_tim_evdev.h"
9
10 static struct event_timer_adapter_ops cnxk_tim_ops;
11
12 static int
13 cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
14                           struct rte_event_timer_adapter_conf *rcfg)
15 {
16         unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
17         unsigned int mp_flags = 0;
18         char pool_name[25];
19         int rc;
20
21         cache_sz /= rte_lcore_count();
22         /* Create chunk pool. */
23         if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
24                 mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
25                 plt_tim_dbg("Using single producer mode");
26                 tim_ring->prod_type_sp = true;
27         }
28
29         snprintf(pool_name, sizeof(pool_name), "cnxk_tim_chunk_pool%d",
30                  tim_ring->ring_id);
31
32         if (cache_sz > CNXK_TIM_MAX_POOL_CACHE_SZ)
33                 cache_sz = CNXK_TIM_MAX_POOL_CACHE_SZ;
34         cache_sz = cache_sz != 0 ? cache_sz : 2;
35         tim_ring->nb_chunks += (cache_sz * rte_lcore_count());
36         if (!tim_ring->disable_npa) {
37                 tim_ring->chunk_pool = rte_mempool_create_empty(
38                         pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
39                         cache_sz, 0, rte_socket_id(), mp_flags);
40
41                 if (tim_ring->chunk_pool == NULL) {
42                         plt_err("Unable to create chunkpool.");
43                         return -ENOMEM;
44                 }
45
46                 rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
47                                                 rte_mbuf_platform_mempool_ops(),
48                                                 NULL);
49                 if (rc < 0) {
50                         plt_err("Unable to set chunkpool ops");
51                         goto free;
52                 }
53
54                 rc = rte_mempool_populate_default(tim_ring->chunk_pool);
55                 if (rc < 0) {
56                         plt_err("Unable to set populate chunkpool.");
57                         goto free;
58                 }
59                 tim_ring->aura = roc_npa_aura_handle_to_aura(
60                         tim_ring->chunk_pool->pool_id);
61                 tim_ring->ena_dfb = 0;
62         } else {
63                 tim_ring->chunk_pool = rte_mempool_create(
64                         pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
65                         cache_sz, 0, NULL, NULL, NULL, NULL, rte_socket_id(),
66                         mp_flags);
67                 if (tim_ring->chunk_pool == NULL) {
68                         plt_err("Unable to create chunkpool.");
69                         return -ENOMEM;
70                 }
71                 tim_ring->ena_dfb = 1;
72         }
73
74         return 0;
75
76 free:
77         rte_mempool_free(tim_ring->chunk_pool);
78         return rc;
79 }
80
81 static void
82 cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
83 {
84         uint8_t prod_flag = !tim_ring->prod_type_sp;
85
86         /* [STATS] [DFB/FB] [SP][MP]*/
87         const rte_event_timer_arm_burst_t arm_burst[2][2][2] = {
88 #define FP(_name, _f3, _f2, _f1, flags)                                        \
89         [_f3][_f2][_f1] = cnxk_tim_arm_burst_##_name,
90                 TIM_ARM_FASTPATH_MODES
91 #undef FP
92         };
93
94         const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2] = {
95 #define FP(_name, _f2, _f1, flags)                                             \
96         [_f2][_f1] = cnxk_tim_arm_tmo_tick_burst_##_name,
97                 TIM_ARM_TMO_FASTPATH_MODES
98 #undef FP
99         };
100
101         cnxk_tim_ops.arm_burst =
102                 arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag];
103         cnxk_tim_ops.arm_tmo_tick_burst =
104                 arm_tmo_burst[tim_ring->enable_stats][tim_ring->ena_dfb];
105         cnxk_tim_ops.cancel_burst = cnxk_tim_timer_cancel_burst;
106 }
107
108 static void
109 cnxk_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
110                        struct rte_event_timer_adapter_info *adptr_info)
111 {
112         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
113
114         adptr_info->max_tmo_ns = tim_ring->max_tout;
115         adptr_info->min_resolution_ns = tim_ring->tck_nsec;
116         rte_memcpy(&adptr_info->conf, &adptr->data->conf,
117                    sizeof(struct rte_event_timer_adapter_conf));
118 }
119
120 static inline void
121 sort_multi_array(double ref_arr[], uint64_t arr1[], uint64_t arr2[],
122                  uint64_t arr3[], uint8_t sz)
123 {
124         int x;
125
126         for (x = 0; x < sz - 1; x++) {
127                 if (ref_arr[x] > ref_arr[x + 1]) {
128                         PLT_SWAP(ref_arr[x], ref_arr[x + 1]);
129                         PLT_SWAP(arr1[x], arr1[x + 1]);
130                         PLT_SWAP(arr2[x], arr2[x + 1]);
131                         PLT_SWAP(arr3[x], arr3[x + 1]);
132                         x = -1;
133                 }
134         }
135 }
136
137 static inline void
138 populate_sample(uint64_t tck[], uint64_t ns[], double diff[], uint64_t dst[],
139                 uint64_t req_tck, uint64_t clk_freq, double tck_ns, uint8_t sz,
140                 bool mov_fwd)
141 {
142         int i;
143
144         for (i = 0; i < sz; i++) {
145                 tck[i] = i ? tck[i - 1] : req_tck;
146                 do {
147                         mov_fwd ? tck[i]++ : tck[i]--;
148                         ns[i] = round((double)tck[i] * tck_ns);
149                         if (round((double)tck[i] * tck_ns) >
150                             ((double)tck[i] * tck_ns))
151                                 continue;
152                 } while (ns[i] % (uint64_t)cnxk_tim_ns_per_tck(clk_freq));
153                 diff[i] = PLT_MAX((double)ns[i], (double)tck[i] * tck_ns) -
154                           PLT_MIN((double)ns[i], (double)tck[i] * tck_ns);
155                 dst[i] = mov_fwd ? tck[i] - req_tck : req_tck - tck[i];
156         }
157 }
158
159 static void
160 tim_adjust_resolution(uint64_t *req_ns, uint64_t *req_tck, double tck_ns,
161                       uint64_t clk_freq, uint64_t max_tmo, uint64_t m_tck)
162 {
163 #define MAX_SAMPLES 5
164         double rmax_diff[MAX_SAMPLES], rmin_diff[MAX_SAMPLES];
165         uint64_t min_tck[MAX_SAMPLES], max_tck[MAX_SAMPLES];
166         uint64_t min_dst[MAX_SAMPLES], max_dst[MAX_SAMPLES];
167         uint64_t min_ns[MAX_SAMPLES], max_ns[MAX_SAMPLES];
168         int i;
169
170         populate_sample(max_tck, max_ns, rmax_diff, max_dst, *req_tck, clk_freq,
171                         tck_ns, MAX_SAMPLES, true);
172         sort_multi_array(rmax_diff, max_dst, max_tck, max_ns, MAX_SAMPLES);
173
174         populate_sample(min_tck, min_ns, rmin_diff, min_dst, *req_tck, clk_freq,
175                         tck_ns, MAX_SAMPLES, false);
176         sort_multi_array(rmin_diff, min_dst, min_tck, min_ns, MAX_SAMPLES);
177
178         for (i = 0; i < MAX_SAMPLES; i++) {
179                 if (min_dst[i] < max_dst[i] && min_tck[i] > m_tck &&
180                     (max_tmo / min_ns[i]) <=
181                             (TIM_MAX_BUCKET_SIZE - TIM_MIN_BUCKET_SIZE)) {
182                         *req_tck = min_tck[i];
183                         *req_ns = min_ns[i];
184                         break;
185                 } else if ((max_tmo / max_ns[i]) <
186                            (TIM_MAX_BUCKET_SIZE - TIM_MIN_BUCKET_SIZE)) {
187                         *req_tck = max_tck[i];
188                         *req_ns = max_ns[i];
189                         break;
190                 }
191         }
192 }
193
194 static int
195 cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
196 {
197         struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
198         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
199         uint64_t min_intvl_ns, min_intvl_cyc;
200         struct cnxk_tim_ring *tim_ring;
201         enum roc_tim_clk_src clk_src;
202         uint64_t clk_freq = 0;
203         int i, rc;
204
205         if (dev == NULL)
206                 return -ENODEV;
207
208         if (adptr->data->id >= dev->nb_rings)
209                 return -ENODEV;
210
211         tim_ring = rte_zmalloc("cnxk_tim_prv", sizeof(struct cnxk_tim_ring), 0);
212         if (tim_ring == NULL)
213                 return -ENOMEM;
214
215         rc = roc_tim_lf_alloc(&dev->tim, adptr->data->id, NULL);
216         if (rc < 0) {
217                 plt_err("Failed to create timer ring");
218                 goto tim_ring_free;
219         }
220
221         clk_src = cnxk_tim_convert_clk_src(rcfg->clk_src);
222         if (clk_src == ROC_TIM_CLK_SRC_INVALID) {
223                 plt_err("Invalid clock source");
224                 goto tim_hw_free;
225         }
226
227         rc = cnxk_tim_get_clk_freq(dev, clk_src, &clk_freq);
228         if (rc < 0) {
229                 plt_err("Failed to get clock frequency");
230                 goto tim_hw_free;
231         }
232
233         rc = roc_tim_lf_interval(&dev->tim, clk_src, clk_freq, &min_intvl_ns,
234                                  &min_intvl_cyc);
235         if (rc < 0) {
236                 plt_err("Failed to get min interval details");
237                 goto tim_hw_free;
238         }
239
240         if (rcfg->timer_tick_ns < min_intvl_ns) {
241                 if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES) {
242                         rcfg->timer_tick_ns = min_intvl_ns;
243                 } else {
244                         rc = -ERANGE;
245                         goto tim_hw_free;
246                 }
247         }
248
249         if (rcfg->timer_tick_ns > rcfg->max_tmo_ns) {
250                 plt_err("Max timeout to too high");
251                 rc = -ERANGE;
252                 goto tim_hw_free;
253         }
254
255         tim_ring->tck_nsec =
256                 round(RTE_ALIGN_MUL_NEAR((long double)rcfg->timer_tick_ns,
257                                          cnxk_tim_ns_per_tck(clk_freq)));
258         if (log10(clk_freq) - floor(log10(clk_freq)) != 0.0) {
259                 uint64_t req_ns, req_tck;
260                 double tck_ns;
261
262                 req_ns = tim_ring->tck_nsec;
263                 tck_ns = NSECPERSEC / clk_freq;
264                 req_tck = round(rcfg->timer_tick_ns / tck_ns);
265                 tim_adjust_resolution(&req_ns, &req_tck, tck_ns, clk_freq,
266                                       rcfg->max_tmo_ns, min_intvl_cyc);
267                 if ((tim_ring->tck_nsec != req_ns) &&
268                     !(rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)) {
269                         rc = -ERANGE;
270                         goto tim_hw_free;
271                 }
272                 tim_ring->tck_nsec = ceil(req_tck * tck_ns);
273         }
274
275         tim_ring->tck_int = round((long double)tim_ring->tck_nsec /
276                                   cnxk_tim_ns_per_tck(clk_freq));
277         tim_ring->tck_nsec =
278                 ceil(tim_ring->tck_int * cnxk_tim_ns_per_tck(clk_freq));
279
280         tim_ring->ring_id = adptr->data->id;
281         tim_ring->clk_src = clk_src;
282         tim_ring->max_tout = rcfg->max_tmo_ns;
283         tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
284         tim_ring->nb_timers = rcfg->nb_timers;
285         tim_ring->chunk_sz = dev->chunk_sz;
286         tim_ring->disable_npa = dev->disable_npa;
287         tim_ring->enable_stats = dev->enable_stats;
288
289         for (i = 0; i < dev->ring_ctl_cnt; i++) {
290                 struct cnxk_tim_ctl *ring_ctl = &dev->ring_ctl_data[i];
291
292                 if (ring_ctl->ring == tim_ring->ring_id) {
293                         tim_ring->chunk_sz =
294                                 ring_ctl->chunk_slots ?
295                                         ((uint32_t)(ring_ctl->chunk_slots + 1) *
296                                          CNXK_TIM_CHUNK_ALIGNMENT) :
297                                               tim_ring->chunk_sz;
298                         tim_ring->enable_stats = ring_ctl->enable_stats;
299                         tim_ring->disable_npa = ring_ctl->disable_npa;
300                 }
301         }
302
303         if (tim_ring->disable_npa) {
304                 tim_ring->nb_chunks =
305                         tim_ring->nb_timers /
306                         CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
307                 tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
308         } else {
309                 tim_ring->nb_chunks = tim_ring->nb_timers;
310         }
311
312         tim_ring->nb_chunk_slots = CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
313         /* Create buckets. */
314         tim_ring->bkt =
315                 rte_zmalloc("cnxk_tim_bucket",
316                             (tim_ring->nb_bkts) * sizeof(struct cnxk_tim_bkt),
317                             RTE_CACHE_LINE_SIZE);
318         if (tim_ring->bkt == NULL)
319                 goto tim_hw_free;
320
321         rc = cnxk_tim_chnk_pool_create(tim_ring, rcfg);
322         if (rc < 0)
323                 goto tim_bkt_free;
324
325         rc = roc_tim_lf_config(&dev->tim, tim_ring->ring_id, clk_src, 0, 0,
326                                tim_ring->nb_bkts, tim_ring->chunk_sz,
327                                tim_ring->tck_int, tim_ring->tck_nsec, clk_freq);
328         if (rc < 0) {
329                 plt_err("Failed to configure timer ring");
330                 goto tim_chnk_free;
331         }
332
333         tim_ring->base = roc_tim_lf_base_get(&dev->tim, tim_ring->ring_id);
334         plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE);
335         plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
336
337         /* Set fastpath ops. */
338         cnxk_tim_set_fp_ops(tim_ring);
339
340         /* Update SSO xae count. */
341         cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
342                               RTE_EVENT_TYPE_TIMER);
343         cnxk_sso_xae_reconfigure(dev->event_dev);
344
345         plt_tim_dbg(
346                 "Total memory used %" PRIu64 "MB\n",
347                 (uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz) +
348                             (tim_ring->nb_bkts * sizeof(struct cnxk_tim_bkt))) /
349                            BIT_ULL(20)));
350
351         adptr->data->adapter_priv = tim_ring;
352         return rc;
353
354 tim_chnk_free:
355         rte_mempool_free(tim_ring->chunk_pool);
356 tim_bkt_free:
357         rte_free(tim_ring->bkt);
358 tim_hw_free:
359         roc_tim_lf_free(&dev->tim, tim_ring->ring_id);
360 tim_ring_free:
361         rte_free(tim_ring);
362         return rc;
363 }
364
365 static int
366 cnxk_tim_ring_free(struct rte_event_timer_adapter *adptr)
367 {
368         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
369         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
370
371         if (dev == NULL)
372                 return -ENODEV;
373
374         roc_tim_lf_free(&dev->tim, tim_ring->ring_id);
375         rte_free(tim_ring->bkt);
376         rte_mempool_free(tim_ring->chunk_pool);
377         rte_free(tim_ring);
378
379         return 0;
380 }
381
382 static void
383 cnxk_tim_calibrate_start_tsc(struct cnxk_tim_ring *tim_ring)
384 {
385 #define CNXK_TIM_CALIB_ITER 1E6
386         uint32_t real_bkt, bucket;
387         int icount, ecount = 0;
388         uint64_t bkt_cyc;
389
390         for (icount = 0; icount < CNXK_TIM_CALIB_ITER; icount++) {
391                 real_bkt = plt_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
392                 bkt_cyc = cnxk_tim_cntvct();
393                 bucket = (bkt_cyc - tim_ring->ring_start_cyc) /
394                          tim_ring->tck_int;
395                 bucket = bucket % (tim_ring->nb_bkts);
396                 tim_ring->ring_start_cyc =
397                         bkt_cyc - (real_bkt * tim_ring->tck_int);
398                 if (bucket != real_bkt)
399                         ecount++;
400         }
401         tim_ring->last_updt_cyc = bkt_cyc;
402         plt_tim_dbg("Bucket mispredict %3.2f distance %d\n",
403                     100 - (((double)(icount - ecount) / (double)icount) * 100),
404                     bucket - real_bkt);
405 }
406
407 static int
408 cnxk_tim_ring_start(const struct rte_event_timer_adapter *adptr)
409 {
410         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
411         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
412         int rc;
413
414         if (dev == NULL)
415                 return -ENODEV;
416
417         rc = roc_tim_lf_enable(&dev->tim, tim_ring->ring_id,
418                                &tim_ring->ring_start_cyc, NULL);
419         if (rc < 0)
420                 return rc;
421
422         tim_ring->tot_int = tim_ring->tck_int * tim_ring->nb_bkts;
423         tim_ring->fast_div = rte_reciprocal_value_u64(tim_ring->tck_int);
424         tim_ring->fast_bkt = rte_reciprocal_value_u64(tim_ring->nb_bkts);
425
426         cnxk_tim_calibrate_start_tsc(tim_ring);
427
428         return rc;
429 }
430
431 static int
432 cnxk_tim_ring_stop(const struct rte_event_timer_adapter *adptr)
433 {
434         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
435         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
436         int rc;
437
438         if (dev == NULL)
439                 return -ENODEV;
440
441         rc = roc_tim_lf_disable(&dev->tim, tim_ring->ring_id);
442         if (rc < 0)
443                 plt_err("Failed to disable timer ring");
444
445         return rc;
446 }
447
448 static int
449 cnxk_tim_stats_get(const struct rte_event_timer_adapter *adapter,
450                    struct rte_event_timer_adapter_stats *stats)
451 {
452         struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv;
453         uint64_t bkt_cyc = cnxk_tim_cntvct() - tim_ring->ring_start_cyc;
454
455         stats->evtim_exp_count =
456                 __atomic_load_n(&tim_ring->arm_cnt, __ATOMIC_RELAXED);
457         stats->ev_enq_count = stats->evtim_exp_count;
458         stats->adapter_tick_count =
459                 rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div);
460         return 0;
461 }
462
463 static int
464 cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
465 {
466         struct cnxk_tim_ring *tim_ring = adapter->data->adapter_priv;
467
468         __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
469         return 0;
470 }
471
472 int
473 cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
474                   uint32_t *caps, const struct event_timer_adapter_ops **ops)
475 {
476         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
477
478         RTE_SET_USED(flags);
479
480         if (dev == NULL)
481                 return -ENODEV;
482
483         cnxk_tim_ops.init = cnxk_tim_ring_create;
484         cnxk_tim_ops.uninit = cnxk_tim_ring_free;
485         cnxk_tim_ops.start = cnxk_tim_ring_start;
486         cnxk_tim_ops.stop = cnxk_tim_ring_stop;
487         cnxk_tim_ops.get_info = cnxk_tim_ring_info_get;
488
489         if (dev->enable_stats) {
490                 cnxk_tim_ops.stats_get = cnxk_tim_stats_get;
491                 cnxk_tim_ops.stats_reset = cnxk_tim_stats_reset;
492         }
493
494         /* Store evdev pointer for later use. */
495         dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
496         *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
497         *ops = &cnxk_tim_ops;
498
499         return 0;
500 }
501
502 static void
503 cnxk_tim_parse_ring_param(char *value, void *opaque)
504 {
505         struct cnxk_tim_evdev *dev = opaque;
506         struct cnxk_tim_ctl ring_ctl = {0};
507         char *tok = strtok(value, "-");
508         struct cnxk_tim_ctl *old_ptr;
509         uint16_t *val;
510
511         val = (uint16_t *)&ring_ctl;
512
513         if (!strlen(value))
514                 return;
515
516         while (tok != NULL) {
517                 *val = atoi(tok);
518                 tok = strtok(NULL, "-");
519                 val++;
520         }
521
522         if (val != (&ring_ctl.enable_stats + 1)) {
523                 plt_err("Invalid ring param expected [ring-chunk_sz-disable_npa-enable_stats]");
524                 return;
525         }
526
527         dev->ring_ctl_cnt++;
528         old_ptr = dev->ring_ctl_data;
529         dev->ring_ctl_data =
530                 rte_realloc(dev->ring_ctl_data,
531                             sizeof(struct cnxk_tim_ctl) * dev->ring_ctl_cnt, 0);
532         if (dev->ring_ctl_data == NULL) {
533                 dev->ring_ctl_data = old_ptr;
534                 dev->ring_ctl_cnt--;
535                 return;
536         }
537
538         dev->ring_ctl_data[dev->ring_ctl_cnt - 1] = ring_ctl;
539 }
540
541 static void
542 cnxk_tim_parse_ring_ctl_list(const char *value, void *opaque)
543 {
544         char *s = strdup(value);
545         char *start = NULL;
546         char *end = NULL;
547         char *f = s;
548
549         if (s == NULL || !strlen(s))
550                 return;
551
552         while (*s) {
553                 if (*s == '[')
554                         start = s;
555                 else if (*s == ']')
556                         end = s;
557                 else
558                         continue;
559
560                 if (start && start < end) {
561                         *end = 0;
562                         cnxk_tim_parse_ring_param(start + 1, opaque);
563                         start = end;
564                         s = end;
565                 }
566                 s++;
567         }
568
569         free(f);
570 }
571
572 static int
573 cnxk_tim_parse_kvargs_dict(const char *key, const char *value, void *opaque)
574 {
575         RTE_SET_USED(key);
576
577         /* Dict format [ring-chunk_sz-disable_npa-enable_stats] use '-' as ','
578          * isn't allowed. 0 represents default.
579          */
580         cnxk_tim_parse_ring_ctl_list(value, opaque);
581
582         return 0;
583 }
584
585 static void
586 cnxk_tim_parse_clk_list(const char *value, void *opaque)
587 {
588         enum roc_tim_clk_src src[] = {ROC_TIM_CLK_SRC_GPIO, ROC_TIM_CLK_SRC_PTP,
589                                       ROC_TIM_CLK_SRC_SYNCE,
590                                       ROC_TIM_CLK_SRC_INVALID};
591         struct cnxk_tim_evdev *dev = opaque;
592         char *str = strdup(value);
593         char *tok;
594         int i = 0;
595
596         if (str == NULL || !strlen(str))
597                 return;
598
599         tok = strtok(str, "-");
600         while (tok != NULL && src[i] != ROC_TIM_CLK_SRC_INVALID) {
601                 dev->ext_clk_freq[src[i]] = strtoull(tok, NULL, 10);
602                 tok = strtok(NULL, "-");
603                 i++;
604         }
605
606         free(str);
607 }
608
609 static int
610 cnxk_tim_parse_kvargs_dsv(const char *key, const char *value, void *opaque)
611 {
612         RTE_SET_USED(key);
613
614         /* DSV format GPIO-PTP-SYNCE-BTS use '-' as ','
615          * isn't allowed. 0 represents default.
616          */
617         cnxk_tim_parse_clk_list(value, opaque);
618
619         return 0;
620 }
621
622 static void
623 cnxk_tim_parse_devargs(struct rte_devargs *devargs, struct cnxk_tim_evdev *dev)
624 {
625         struct rte_kvargs *kvlist;
626
627         if (devargs == NULL)
628                 return;
629
630         kvlist = rte_kvargs_parse(devargs->args, NULL);
631         if (kvlist == NULL)
632                 return;
633
634         rte_kvargs_process(kvlist, CNXK_TIM_DISABLE_NPA, &parse_kvargs_flag,
635                            &dev->disable_npa);
636         rte_kvargs_process(kvlist, CNXK_TIM_CHNK_SLOTS, &parse_kvargs_value,
637                            &dev->chunk_slots);
638         rte_kvargs_process(kvlist, CNXK_TIM_STATS_ENA, &parse_kvargs_flag,
639                            &dev->enable_stats);
640         rte_kvargs_process(kvlist, CNXK_TIM_RINGS_LMT, &parse_kvargs_value,
641                            &dev->min_ring_cnt);
642         rte_kvargs_process(kvlist, CNXK_TIM_RING_CTL,
643                            &cnxk_tim_parse_kvargs_dict, &dev);
644         rte_kvargs_process(kvlist, CNXK_TIM_EXT_CLK, &cnxk_tim_parse_kvargs_dsv,
645                            dev);
646
647         rte_kvargs_free(kvlist);
648 }
649
650 void
651 cnxk_tim_init(struct roc_sso *sso)
652 {
653         const struct rte_memzone *mz;
654         struct cnxk_tim_evdev *dev;
655         int rc;
656
657         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
658                 return;
659
660         mz = rte_memzone_reserve(RTE_STR(CNXK_TIM_EVDEV_NAME),
661                                  sizeof(struct cnxk_tim_evdev), 0, 0);
662         if (mz == NULL) {
663                 plt_tim_dbg("Unable to allocate memory for TIM Event device");
664                 return;
665         }
666         dev = mz->addr;
667
668         cnxk_tim_parse_devargs(sso->pci_dev->device.devargs, dev);
669
670         dev->tim.roc_sso = sso;
671         dev->tim.nb_lfs = dev->min_ring_cnt;
672         rc = roc_tim_init(&dev->tim);
673         if (rc < 0) {
674                 plt_err("Failed to initialize roc tim resources");
675                 rte_memzone_free(mz);
676                 return;
677         }
678         dev->nb_rings = rc;
679
680         if (dev->chunk_slots && dev->chunk_slots <= CNXK_TIM_MAX_CHUNK_SLOTS &&
681             dev->chunk_slots >= CNXK_TIM_MIN_CHUNK_SLOTS) {
682                 dev->chunk_sz =
683                         (dev->chunk_slots + 1) * CNXK_TIM_CHUNK_ALIGNMENT;
684         } else {
685                 dev->chunk_sz = CNXK_TIM_RING_DEF_CHUNK_SZ;
686         }
687 }
688
689 void
690 cnxk_tim_fini(void)
691 {
692         struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
693
694         if (dev == NULL || rte_eal_process_type() != RTE_PROC_PRIMARY)
695                 return;
696
697         roc_tim_fini(&dev->tim);
698         rte_memzone_free(rte_memzone_lookup(RTE_STR(CNXK_TIM_EVDEV_NAME)));
699 }