common/cnxk: fix null pointer dereference
[dpdk.git] / lib / eventdev / rte_event_timer_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_timer.h>
16 #include <rte_service_component.h>
17
18 #include "event_timer_adapter_pmd.h"
19 #include "eventdev_pmd.h"
20 #include "rte_event_timer_adapter.h"
21 #include "rte_eventdev.h"
22 #include "eventdev_trace.h"
23
24 #define DATA_MZ_NAME_MAX_LEN 64
25 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
26
27 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
28 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
29 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
30
31 static struct rte_event_timer_adapter *adapters;
32
33 static const struct event_timer_adapter_ops swtim_ops;
34
35 #define EVTIM_LOG(level, logtype, ...) \
36         rte_log(RTE_LOG_ ## level, logtype, \
37                 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
38                         "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
39
40 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
41
42 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
43 #define EVTIM_LOG_DBG(...) \
44         EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
45 #define EVTIM_BUF_LOG_DBG(...) \
46         EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
47 #define EVTIM_SVC_LOG_DBG(...) \
48         EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
49 #else
50 #define EVTIM_LOG_DBG(...) (void)0
51 #define EVTIM_BUF_LOG_DBG(...) (void)0
52 #define EVTIM_SVC_LOG_DBG(...) (void)0
53 #endif
54
55 static int
56 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
57                      void *conf_arg)
58 {
59         struct rte_event_timer_adapter *adapter;
60         struct rte_eventdev *dev;
61         struct rte_event_dev_config dev_conf;
62         struct rte_event_port_conf *port_conf, def_port_conf = {0};
63         int started;
64         uint8_t port_id;
65         uint8_t dev_id;
66         int ret;
67
68         RTE_SET_USED(event_dev_id);
69
70         adapter = &adapters[id];
71         dev = &rte_eventdevs[adapter->data->event_dev_id];
72         dev_id = dev->data->dev_id;
73         dev_conf = dev->data->dev_conf;
74
75         started = dev->data->dev_started;
76         if (started)
77                 rte_event_dev_stop(dev_id);
78
79         port_id = dev_conf.nb_event_ports;
80         dev_conf.nb_event_ports += 1;
81         ret = rte_event_dev_configure(dev_id, &dev_conf);
82         if (ret < 0) {
83                 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
84                 if (started)
85                         if (rte_event_dev_start(dev_id))
86                                 return -EIO;
87
88                 return ret;
89         }
90
91         if (conf_arg != NULL)
92                 port_conf = conf_arg;
93         else {
94                 port_conf = &def_port_conf;
95                 ret = rte_event_port_default_conf_get(dev_id, port_id,
96                                                       port_conf);
97                 if (ret < 0)
98                         return ret;
99         }
100
101         ret = rte_event_port_setup(dev_id, port_id, port_conf);
102         if (ret < 0) {
103                 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
104                               port_id, dev_id);
105                 return ret;
106         }
107
108         *event_port_id = port_id;
109
110         if (started)
111                 ret = rte_event_dev_start(dev_id);
112
113         return ret;
114 }
115
116 struct rte_event_timer_adapter *
117 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
118 {
119         return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
120                                                   NULL);
121 }
122
123 struct rte_event_timer_adapter *
124 rte_event_timer_adapter_create_ext(
125                 const struct rte_event_timer_adapter_conf *conf,
126                 rte_event_timer_adapter_port_conf_cb_t conf_cb,
127                 void *conf_arg)
128 {
129         uint16_t adapter_id;
130         struct rte_event_timer_adapter *adapter;
131         const struct rte_memzone *mz;
132         char mz_name[DATA_MZ_NAME_MAX_LEN];
133         int n, ret;
134         struct rte_eventdev *dev;
135
136         if (adapters == NULL) {
137                 adapters = rte_zmalloc("Eventdev",
138                                        sizeof(struct rte_event_timer_adapter) *
139                                                RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
140                                        RTE_CACHE_LINE_SIZE);
141                 if (adapters == NULL) {
142                         rte_errno = ENOMEM;
143                         return NULL;
144                 }
145         }
146
147         if (conf == NULL) {
148                 rte_errno = EINVAL;
149                 return NULL;
150         }
151
152         /* Check eventdev ID */
153         if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
154                 rte_errno = EINVAL;
155                 return NULL;
156         }
157         dev = &rte_eventdevs[conf->event_dev_id];
158
159         adapter_id = conf->timer_adapter_id;
160
161         /* Check that adapter_id is in range */
162         if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
163                 rte_errno = EINVAL;
164                 return NULL;
165         }
166
167         /* Check adapter ID not already allocated */
168         adapter = &adapters[adapter_id];
169         if (adapter->allocated) {
170                 rte_errno = EEXIST;
171                 return NULL;
172         }
173
174         /* Create shared data area. */
175         n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
176         if (n >= (int)sizeof(mz_name)) {
177                 rte_errno = EINVAL;
178                 return NULL;
179         }
180         mz = rte_memzone_reserve(mz_name,
181                                  sizeof(struct rte_event_timer_adapter_data),
182                                  conf->socket_id, 0);
183         if (mz == NULL)
184                 /* rte_errno set by rte_memzone_reserve */
185                 return NULL;
186
187         adapter->data = mz->addr;
188         memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
189
190         adapter->data->mz = mz;
191         adapter->data->event_dev_id = conf->event_dev_id;
192         adapter->data->id = adapter_id;
193         adapter->data->socket_id = conf->socket_id;
194         adapter->data->conf = *conf;  /* copy conf structure */
195
196         /* Query eventdev PMD for timer adapter capabilities and ops */
197         ret = dev->dev_ops->timer_adapter_caps_get(dev,
198                                                    adapter->data->conf.flags,
199                                                    &adapter->data->caps,
200                                                    &adapter->ops);
201         if (ret < 0) {
202                 rte_errno = -ret;
203                 goto free_memzone;
204         }
205
206         if (!(adapter->data->caps &
207               RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
208                 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
209                 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
210                               &adapter->data->event_port_id, conf_arg);
211                 if (ret < 0) {
212                         rte_errno = -ret;
213                         goto free_memzone;
214                 }
215         }
216
217         /* If eventdev PMD did not provide ops, use default software
218          * implementation.
219          */
220         if (adapter->ops == NULL)
221                 adapter->ops = &swtim_ops;
222
223         /* Allow driver to do some setup */
224         FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
225         ret = adapter->ops->init(adapter);
226         if (ret < 0) {
227                 rte_errno = -ret;
228                 goto free_memzone;
229         }
230
231         /* Set fast-path function pointers */
232         adapter->arm_burst = adapter->ops->arm_burst;
233         adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
234         adapter->cancel_burst = adapter->ops->cancel_burst;
235
236         adapter->allocated = 1;
237
238         rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
239                 conf_cb);
240         return adapter;
241
242 free_memzone:
243         rte_memzone_free(adapter->data->mz);
244         return NULL;
245 }
246
247 int
248 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
249                 struct rte_event_timer_adapter_info *adapter_info)
250 {
251         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
252
253         if (adapter->ops->get_info)
254                 /* let driver set values it knows */
255                 adapter->ops->get_info(adapter, adapter_info);
256
257         /* Set common values */
258         adapter_info->conf = adapter->data->conf;
259         adapter_info->event_dev_port_id = adapter->data->event_port_id;
260         adapter_info->caps = adapter->data->caps;
261
262         return 0;
263 }
264
265 int
266 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
267 {
268         int ret;
269
270         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
271         FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
272
273         if (adapter->data->started) {
274                 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
275                               adapter->data->id);
276                 return -EALREADY;
277         }
278
279         ret = adapter->ops->start(adapter);
280         if (ret < 0)
281                 return ret;
282
283         adapter->data->started = 1;
284         rte_eventdev_trace_timer_adapter_start(adapter);
285         return 0;
286 }
287
288 int
289 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
290 {
291         int ret;
292
293         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
294         FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
295
296         if (adapter->data->started == 0) {
297                 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
298                               adapter->data->id);
299                 return 0;
300         }
301
302         ret = adapter->ops->stop(adapter);
303         if (ret < 0)
304                 return ret;
305
306         adapter->data->started = 0;
307         rte_eventdev_trace_timer_adapter_stop(adapter);
308         return 0;
309 }
310
311 struct rte_event_timer_adapter *
312 rte_event_timer_adapter_lookup(uint16_t adapter_id)
313 {
314         char name[DATA_MZ_NAME_MAX_LEN];
315         const struct rte_memzone *mz;
316         struct rte_event_timer_adapter_data *data;
317         struct rte_event_timer_adapter *adapter;
318         int ret;
319         struct rte_eventdev *dev;
320
321         if (adapters == NULL) {
322                 adapters = rte_zmalloc("Eventdev",
323                                        sizeof(struct rte_event_timer_adapter) *
324                                                RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
325                                        RTE_CACHE_LINE_SIZE);
326                 if (adapters == NULL) {
327                         rte_errno = ENOMEM;
328                         return NULL;
329                 }
330         }
331
332         if (adapters[adapter_id].allocated)
333                 return &adapters[adapter_id]; /* Adapter is already loaded */
334
335         snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
336         mz = rte_memzone_lookup(name);
337         if (mz == NULL) {
338                 rte_errno = ENOENT;
339                 return NULL;
340         }
341
342         data = mz->addr;
343
344         adapter = &adapters[data->id];
345         adapter->data = data;
346
347         dev = &rte_eventdevs[adapter->data->event_dev_id];
348
349         /* Query eventdev PMD for timer adapter capabilities and ops */
350         ret = dev->dev_ops->timer_adapter_caps_get(dev,
351                                                    adapter->data->conf.flags,
352                                                    &adapter->data->caps,
353                                                    &adapter->ops);
354         if (ret < 0) {
355                 rte_errno = EINVAL;
356                 return NULL;
357         }
358
359         /* If eventdev PMD did not provide ops, use default software
360          * implementation.
361          */
362         if (adapter->ops == NULL)
363                 adapter->ops = &swtim_ops;
364
365         /* Set fast-path function pointers */
366         adapter->arm_burst = adapter->ops->arm_burst;
367         adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
368         adapter->cancel_burst = adapter->ops->cancel_burst;
369
370         adapter->allocated = 1;
371
372         return adapter;
373 }
374
375 int
376 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
377 {
378         int i, ret;
379
380         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
381         FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
382
383         if (adapter->data->started == 1) {
384                 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
385                               "before freeing", adapter->data->id);
386                 return -EBUSY;
387         }
388
389         /* free impl priv data */
390         ret = adapter->ops->uninit(adapter);
391         if (ret < 0)
392                 return ret;
393
394         /* free shared data area */
395         ret = rte_memzone_free(adapter->data->mz);
396         if (ret < 0)
397                 return ret;
398
399         adapter->data = NULL;
400         adapter->allocated = 0;
401
402         ret = 0;
403         for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
404                 if (adapters[i].allocated)
405                         ret = adapters[i].allocated;
406
407         if (!ret) {
408                 rte_free(adapters);
409                 adapters = NULL;
410         }
411
412         rte_eventdev_trace_timer_adapter_free(adapter);
413         return 0;
414 }
415
416 int
417 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
418                                        uint32_t *service_id)
419 {
420         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
421
422         if (adapter->data->service_inited && service_id != NULL)
423                 *service_id = adapter->data->service_id;
424
425         return adapter->data->service_inited ? 0 : -ESRCH;
426 }
427
428 int
429 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
430                                   struct rte_event_timer_adapter_stats *stats)
431 {
432         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
433         FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
434         if (stats == NULL)
435                 return -EINVAL;
436
437         return adapter->ops->stats_get(adapter, stats);
438 }
439
440 int
441 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
442 {
443         ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
444         FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
445         return adapter->ops->stats_reset(adapter);
446 }
447
448 /*
449  * Software event timer adapter buffer helper functions
450  */
451
452 #define NSECPERSEC 1E9
453
454 /* Optimizations used to index into the buffer require that the buffer size
455  * be a power of 2.
456  */
457 #define EVENT_BUFFER_SZ 4096
458 #define EVENT_BUFFER_BATCHSZ 32
459 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
460
461 #define EXP_TIM_BUF_SZ 128
462
463 struct event_buffer {
464         size_t head;
465         size_t tail;
466         struct rte_event events[EVENT_BUFFER_SZ];
467 } __rte_cache_aligned;
468
469 static inline bool
470 event_buffer_full(struct event_buffer *bufp)
471 {
472         return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
473 }
474
475 static inline bool
476 event_buffer_batch_ready(struct event_buffer *bufp)
477 {
478         return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
479 }
480
481 static void
482 event_buffer_init(struct event_buffer *bufp)
483 {
484         bufp->head = bufp->tail = 0;
485         memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
486 }
487
488 static int
489 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
490 {
491         size_t head_idx;
492         struct rte_event *buf_eventp;
493
494         if (event_buffer_full(bufp))
495                 return -1;
496
497         /* Instead of modulus, bitwise AND with mask to get head_idx. */
498         head_idx = bufp->head & EVENT_BUFFER_MASK;
499         buf_eventp = &bufp->events[head_idx];
500         rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
501
502         /* Wrap automatically when overflow occurs. */
503         bufp->head++;
504
505         return 0;
506 }
507
508 static void
509 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
510                    uint16_t *nb_events_flushed,
511                    uint16_t *nb_events_inv)
512 {
513         struct rte_event *events = bufp->events;
514         size_t head_idx, tail_idx;
515         uint16_t n = 0;
516
517         /* Instead of modulus, bitwise AND with mask to get index. */
518         head_idx = bufp->head & EVENT_BUFFER_MASK;
519         tail_idx = bufp->tail & EVENT_BUFFER_MASK;
520
521         RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
522
523         /* Determine the largest contiguous run we can attempt to enqueue to the
524          * event device.
525          */
526         if (head_idx > tail_idx)
527                 n = head_idx - tail_idx;
528         else if (head_idx < tail_idx)
529                 n = EVENT_BUFFER_SZ - tail_idx;
530         else if (event_buffer_full(bufp))
531                 n = EVENT_BUFFER_SZ - tail_idx;
532         else {
533                 *nb_events_flushed = 0;
534                 return;
535         }
536
537         n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
538         *nb_events_inv = 0;
539
540         *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
541                                                      &events[tail_idx], n);
542         if (*nb_events_flushed != n) {
543                 if (rte_errno == EINVAL) {
544                         EVTIM_LOG_ERR("failed to enqueue invalid event - "
545                                       "dropping it");
546                         (*nb_events_inv)++;
547                 } else if (rte_errno == ENOSPC)
548                         rte_pause();
549         }
550
551         if (*nb_events_flushed > 0)
552                 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
553                                   "device", *nb_events_flushed);
554
555         bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
556 }
557
558 /*
559  * Software event timer adapter implementation
560  */
561 struct swtim {
562         /* Identifier of service executing timer management logic. */
563         uint32_t service_id;
564         /* The cycle count at which the adapter should next tick */
565         uint64_t next_tick_cycles;
566         /* The tick resolution used by adapter instance. May have been
567          * adjusted from what user requested
568          */
569         uint64_t timer_tick_ns;
570         /* Maximum timeout in nanoseconds allowed by adapter instance. */
571         uint64_t max_tmo_ns;
572         /* Buffered timer expiry events to be enqueued to an event device. */
573         struct event_buffer buffer;
574         /* Statistics */
575         struct rte_event_timer_adapter_stats stats;
576         /* Mempool of timer objects */
577         struct rte_mempool *tim_pool;
578         /* Back pointer for convenience */
579         struct rte_event_timer_adapter *adapter;
580         /* Identifier of timer data instance */
581         uint32_t timer_data_id;
582         /* Track which cores have actually armed a timer */
583         struct {
584                 uint16_t v;
585         } __rte_cache_aligned in_use[RTE_MAX_LCORE];
586         /* Track which cores' timer lists should be polled */
587         unsigned int poll_lcores[RTE_MAX_LCORE];
588         /* The number of lists that should be polled */
589         int n_poll_lcores;
590         /* Timers which have expired and can be returned to a mempool */
591         struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
592         /* The number of timers that can be returned to a mempool */
593         size_t n_expired_timers;
594 };
595
596 static inline struct swtim *
597 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
598 {
599         return adapter->data->adapter_priv;
600 }
601
602 static void
603 swtim_callback(struct rte_timer *tim)
604 {
605         struct rte_event_timer *evtim = tim->arg;
606         struct rte_event_timer_adapter *adapter;
607         unsigned int lcore = rte_lcore_id();
608         struct swtim *sw;
609         uint16_t nb_evs_flushed = 0;
610         uint16_t nb_evs_invalid = 0;
611         uint64_t opaque;
612         int ret;
613         int n_lcores;
614
615         opaque = evtim->impl_opaque[1];
616         adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
617         sw = swtim_pmd_priv(adapter);
618
619         ret = event_buffer_add(&sw->buffer, &evtim->ev);
620         if (ret < 0) {
621                 /* If event buffer is full, put timer back in list with
622                  * immediate expiry value, so that we process it again on the
623                  * next iteration.
624                  */
625                 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
626                                           lcore, NULL, evtim);
627                 if (ret < 0) {
628                         EVTIM_LOG_DBG("event buffer full, failed to reset "
629                                       "timer with immediate expiry value");
630                 } else {
631                         sw->stats.evtim_retry_count++;
632                         EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
633                                       "with immediate expiry value");
634                 }
635
636                 if (unlikely(sw->in_use[lcore].v == 0)) {
637                         sw->in_use[lcore].v = 1;
638                         n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
639                                                      __ATOMIC_RELAXED);
640                         __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
641                                         __ATOMIC_RELAXED);
642                 }
643         } else {
644                 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
645
646                 /* Empty the buffer here, if necessary, to free older expired
647                  * timers only
648                  */
649                 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
650                         rte_mempool_put_bulk(sw->tim_pool,
651                                              (void **)sw->expired_timers,
652                                              sw->n_expired_timers);
653                         sw->n_expired_timers = 0;
654                 }
655
656                 sw->expired_timers[sw->n_expired_timers++] = tim;
657                 sw->stats.evtim_exp_count++;
658
659                 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
660                                 __ATOMIC_RELEASE);
661         }
662
663         if (event_buffer_batch_ready(&sw->buffer)) {
664                 event_buffer_flush(&sw->buffer,
665                                    adapter->data->event_dev_id,
666                                    adapter->data->event_port_id,
667                                    &nb_evs_flushed,
668                                    &nb_evs_invalid);
669
670                 sw->stats.ev_enq_count += nb_evs_flushed;
671                 sw->stats.ev_inv_count += nb_evs_invalid;
672         }
673 }
674
675 static __rte_always_inline uint64_t
676 get_timeout_cycles(struct rte_event_timer *evtim,
677                    const struct rte_event_timer_adapter *adapter)
678 {
679         struct swtim *sw = swtim_pmd_priv(adapter);
680         uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
681         return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
682 }
683
684 /* This function returns true if one or more (adapter) ticks have occurred since
685  * the last time it was called.
686  */
687 static inline bool
688 swtim_did_tick(struct swtim *sw)
689 {
690         uint64_t cycles_per_adapter_tick, start_cycles;
691         uint64_t *next_tick_cyclesp;
692
693         next_tick_cyclesp = &sw->next_tick_cycles;
694         cycles_per_adapter_tick = sw->timer_tick_ns *
695                         (rte_get_timer_hz() / NSECPERSEC);
696         start_cycles = rte_get_timer_cycles();
697
698         /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
699          * execute, and set things going.
700          */
701
702         if (start_cycles >= *next_tick_cyclesp) {
703                 /* Snap the current cycle count to the preceding adapter tick
704                  * boundary.
705                  */
706                 start_cycles -= start_cycles % cycles_per_adapter_tick;
707                 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
708
709                 return true;
710         }
711
712         return false;
713 }
714
715 /* Check that event timer timeout value is in range */
716 static __rte_always_inline int
717 check_timeout(struct rte_event_timer *evtim,
718               const struct rte_event_timer_adapter *adapter)
719 {
720         uint64_t tmo_nsec;
721         struct swtim *sw = swtim_pmd_priv(adapter);
722
723         tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
724         if (tmo_nsec > sw->max_tmo_ns)
725                 return -1;
726         if (tmo_nsec < sw->timer_tick_ns)
727                 return -2;
728
729         return 0;
730 }
731
732 /* Check that event timer event queue sched type matches destination event queue
733  * sched type
734  */
735 static __rte_always_inline int
736 check_destination_event_queue(struct rte_event_timer *evtim,
737                               const struct rte_event_timer_adapter *adapter)
738 {
739         int ret;
740         uint32_t sched_type;
741
742         ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
743                                        evtim->ev.queue_id,
744                                        RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
745                                        &sched_type);
746
747         if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
748             ret == -EOVERFLOW)
749                 return 0;
750
751         return -1;
752 }
753
754 static int
755 swtim_service_func(void *arg)
756 {
757         struct rte_event_timer_adapter *adapter = arg;
758         struct swtim *sw = swtim_pmd_priv(adapter);
759         uint16_t nb_evs_flushed = 0;
760         uint16_t nb_evs_invalid = 0;
761
762         if (swtim_did_tick(sw)) {
763                 rte_timer_alt_manage(sw->timer_data_id,
764                                      sw->poll_lcores,
765                                      sw->n_poll_lcores,
766                                      swtim_callback);
767
768                 /* Return expired timer objects back to mempool */
769                 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
770                                      sw->n_expired_timers);
771                 sw->n_expired_timers = 0;
772
773                 event_buffer_flush(&sw->buffer,
774                                    adapter->data->event_dev_id,
775                                    adapter->data->event_port_id,
776                                    &nb_evs_flushed,
777                                    &nb_evs_invalid);
778
779                 sw->stats.ev_enq_count += nb_evs_flushed;
780                 sw->stats.ev_inv_count += nb_evs_invalid;
781                 sw->stats.adapter_tick_count++;
782         }
783
784         rte_event_maintain(adapter->data->event_dev_id,
785                            adapter->data->event_port_id, 0);
786
787         return 0;
788 }
789
790 /* The adapter initialization function rounds the mempool size up to the next
791  * power of 2, so we can take the difference between that value and what the
792  * user requested, and use the space for caches.  This avoids a scenario where a
793  * user can't arm the number of timers the adapter was configured with because
794  * mempool objects have been lost to caches.
795  *
796  * nb_actual should always be a power of 2, so we can iterate over the powers
797  * of 2 to see what the largest cache size we can use is.
798  */
799 static int
800 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
801 {
802         int i;
803         int size;
804         int cache_size = 0;
805
806         for (i = 0;; i++) {
807                 size = 1 << i;
808
809                 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
810                     size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
811                     size <= nb_actual / 1.5)
812                         cache_size = size;
813                 else
814                         break;
815         }
816
817         return cache_size;
818 }
819
820 static int
821 swtim_init(struct rte_event_timer_adapter *adapter)
822 {
823         int i, ret;
824         struct swtim *sw;
825         unsigned int flags;
826         struct rte_service_spec service;
827
828         /* Allocate storage for private data area */
829 #define SWTIM_NAMESIZE 32
830         char swtim_name[SWTIM_NAMESIZE];
831         snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
832                         adapter->data->id);
833         sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
834                         adapter->data->socket_id);
835         if (sw == NULL) {
836                 EVTIM_LOG_ERR("failed to allocate space for private data");
837                 rte_errno = ENOMEM;
838                 return -1;
839         }
840
841         /* Connect storage to adapter instance */
842         adapter->data->adapter_priv = sw;
843         sw->adapter = adapter;
844
845         sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
846         sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
847
848         /* Create a timer pool */
849         char pool_name[SWTIM_NAMESIZE];
850         snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
851                  adapter->data->id);
852         /* Optimal mempool size is a power of 2 minus one */
853         uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
854         int pool_size = nb_timers - 1;
855         int cache_size = compute_msg_mempool_cache_size(
856                                 adapter->data->conf.nb_timers, nb_timers);
857         flags = 0; /* pool is multi-producer, multi-consumer */
858         sw->tim_pool = rte_mempool_create(pool_name, pool_size,
859                         sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
860                         NULL, NULL, adapter->data->socket_id, flags);
861         if (sw->tim_pool == NULL) {
862                 EVTIM_LOG_ERR("failed to create timer object mempool");
863                 rte_errno = ENOMEM;
864                 goto free_alloc;
865         }
866
867         /* Initialize the variables that track in-use timer lists */
868         for (i = 0; i < RTE_MAX_LCORE; i++)
869                 sw->in_use[i].v = 0;
870
871         /* Initialize the timer subsystem and allocate timer data instance */
872         ret = rte_timer_subsystem_init();
873         if (ret < 0) {
874                 if (ret != -EALREADY) {
875                         EVTIM_LOG_ERR("failed to initialize timer subsystem");
876                         rte_errno = -ret;
877                         goto free_mempool;
878                 }
879         }
880
881         ret = rte_timer_data_alloc(&sw->timer_data_id);
882         if (ret < 0) {
883                 EVTIM_LOG_ERR("failed to allocate timer data instance");
884                 rte_errno = -ret;
885                 goto free_mempool;
886         }
887
888         /* Initialize timer event buffer */
889         event_buffer_init(&sw->buffer);
890
891         sw->adapter = adapter;
892
893         /* Register a service component to run adapter logic */
894         memset(&service, 0, sizeof(service));
895         snprintf(service.name, RTE_SERVICE_NAME_MAX,
896                  "swtim_svc_%"PRIu8, adapter->data->id);
897         service.socket_id = adapter->data->socket_id;
898         service.callback = swtim_service_func;
899         service.callback_userdata = adapter;
900         service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
901         ret = rte_service_component_register(&service, &sw->service_id);
902         if (ret < 0) {
903                 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
904                               ": err = %d", service.name, sw->service_id,
905                               ret);
906
907                 rte_errno = ENOSPC;
908                 goto free_mempool;
909         }
910
911         EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
912                       sw->service_id);
913
914         adapter->data->service_id = sw->service_id;
915         adapter->data->service_inited = 1;
916
917         return 0;
918 free_mempool:
919         rte_mempool_free(sw->tim_pool);
920 free_alloc:
921         rte_free(sw);
922         return -1;
923 }
924
925 static void
926 swtim_free_tim(struct rte_timer *tim, void *arg)
927 {
928         struct swtim *sw = arg;
929
930         rte_mempool_put(sw->tim_pool, tim);
931 }
932
933 /* Traverse the list of outstanding timers and put them back in the mempool
934  * before freeing the adapter to avoid leaking the memory.
935  */
936 static int
937 swtim_uninit(struct rte_event_timer_adapter *adapter)
938 {
939         int ret;
940         struct swtim *sw = swtim_pmd_priv(adapter);
941
942         /* Free outstanding timers */
943         rte_timer_stop_all(sw->timer_data_id,
944                            sw->poll_lcores,
945                            sw->n_poll_lcores,
946                            swtim_free_tim,
947                            sw);
948
949         ret = rte_service_component_unregister(sw->service_id);
950         if (ret < 0) {
951                 EVTIM_LOG_ERR("failed to unregister service component");
952                 return ret;
953         }
954
955         rte_mempool_free(sw->tim_pool);
956         rte_free(sw);
957         adapter->data->adapter_priv = NULL;
958
959         return 0;
960 }
961
962 static inline int32_t
963 get_mapped_count_for_service(uint32_t service_id)
964 {
965         int32_t core_count, i, mapped_count = 0;
966         uint32_t lcore_arr[RTE_MAX_LCORE];
967
968         core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
969
970         for (i = 0; i < core_count; i++)
971                 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
972                         mapped_count++;
973
974         return mapped_count;
975 }
976
977 static int
978 swtim_start(const struct rte_event_timer_adapter *adapter)
979 {
980         int mapped_count;
981         struct swtim *sw = swtim_pmd_priv(adapter);
982
983         /* Mapping the service to more than one service core can introduce
984          * delays while one thread is waiting to acquire a lock, so only allow
985          * one core to be mapped to the service.
986          *
987          * Note: the service could be modified such that it spreads cores to
988          * poll over multiple service instances.
989          */
990         mapped_count = get_mapped_count_for_service(sw->service_id);
991
992         if (mapped_count != 1)
993                 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
994
995         return rte_service_component_runstate_set(sw->service_id, 1);
996 }
997
998 static int
999 swtim_stop(const struct rte_event_timer_adapter *adapter)
1000 {
1001         int ret;
1002         struct swtim *sw = swtim_pmd_priv(adapter);
1003
1004         ret = rte_service_component_runstate_set(sw->service_id, 0);
1005         if (ret < 0)
1006                 return ret;
1007
1008         /* Wait for the service to complete its final iteration */
1009         while (rte_service_may_be_active(sw->service_id))
1010                 rte_pause();
1011
1012         return 0;
1013 }
1014
1015 static void
1016 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1017                 struct rte_event_timer_adapter_info *adapter_info)
1018 {
1019         struct swtim *sw = swtim_pmd_priv(adapter);
1020         adapter_info->min_resolution_ns = sw->timer_tick_ns;
1021         adapter_info->max_tmo_ns = sw->max_tmo_ns;
1022 }
1023
1024 static int
1025 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1026                 struct rte_event_timer_adapter_stats *stats)
1027 {
1028         struct swtim *sw = swtim_pmd_priv(adapter);
1029         *stats = sw->stats; /* structure copy */
1030         return 0;
1031 }
1032
1033 static int
1034 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1035 {
1036         struct swtim *sw = swtim_pmd_priv(adapter);
1037         memset(&sw->stats, 0, sizeof(sw->stats));
1038         return 0;
1039 }
1040
1041 static uint16_t
1042 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1043                 struct rte_event_timer **evtims,
1044                 uint16_t nb_evtims)
1045 {
1046         int i, ret;
1047         struct swtim *sw = swtim_pmd_priv(adapter);
1048         uint32_t lcore_id = rte_lcore_id();
1049         struct rte_timer *tim, *tims[nb_evtims];
1050         uint64_t cycles;
1051         int n_lcores;
1052         /* Timer list for this lcore is not in use. */
1053         uint16_t exp_state = 0;
1054         enum rte_event_timer_state n_state;
1055
1056 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1057         /* Check that the service is running. */
1058         if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1059                 rte_errno = EINVAL;
1060                 return 0;
1061         }
1062 #endif
1063
1064         /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1065          * the highest lcore to insert such timers into
1066          */
1067         if (lcore_id == LCORE_ID_ANY)
1068                 lcore_id = RTE_MAX_LCORE - 1;
1069
1070         /* If this is the first time we're arming an event timer on this lcore,
1071          * mark this lcore as "in use"; this will cause the service
1072          * function to process the timer list that corresponds to this lcore.
1073          * The atomic compare-and-swap operation can prevent the race condition
1074          * on in_use flag between multiple non-EAL threads.
1075          */
1076         if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1077                         &exp_state, 1, 0,
1078                         __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1079                 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1080                               lcore_id);
1081                 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1082                                              __ATOMIC_RELAXED);
1083                 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1084                                 __ATOMIC_RELAXED);
1085         }
1086
1087         ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1088                                    nb_evtims);
1089         if (ret < 0) {
1090                 rte_errno = ENOSPC;
1091                 return 0;
1092         }
1093
1094         for (i = 0; i < nb_evtims; i++) {
1095                 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1096                 if (n_state == RTE_EVENT_TIMER_ARMED) {
1097                         rte_errno = EALREADY;
1098                         break;
1099                 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1100                              n_state == RTE_EVENT_TIMER_CANCELED)) {
1101                         rte_errno = EINVAL;
1102                         break;
1103                 }
1104
1105                 ret = check_timeout(evtims[i], adapter);
1106                 if (unlikely(ret == -1)) {
1107                         __atomic_store_n(&evtims[i]->state,
1108                                         RTE_EVENT_TIMER_ERROR_TOOLATE,
1109                                         __ATOMIC_RELAXED);
1110                         rte_errno = EINVAL;
1111                         break;
1112                 } else if (unlikely(ret == -2)) {
1113                         __atomic_store_n(&evtims[i]->state,
1114                                         RTE_EVENT_TIMER_ERROR_TOOEARLY,
1115                                         __ATOMIC_RELAXED);
1116                         rte_errno = EINVAL;
1117                         break;
1118                 }
1119
1120                 if (unlikely(check_destination_event_queue(evtims[i],
1121                                                            adapter) < 0)) {
1122                         __atomic_store_n(&evtims[i]->state,
1123                                         RTE_EVENT_TIMER_ERROR,
1124                                         __ATOMIC_RELAXED);
1125                         rte_errno = EINVAL;
1126                         break;
1127                 }
1128
1129                 tim = tims[i];
1130                 rte_timer_init(tim);
1131
1132                 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1133                 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1134
1135                 cycles = get_timeout_cycles(evtims[i], adapter);
1136                 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1137                                           SINGLE, lcore_id, NULL, evtims[i]);
1138                 if (ret < 0) {
1139                         /* tim was in RUNNING or CONFIG state */
1140                         __atomic_store_n(&evtims[i]->state,
1141                                         RTE_EVENT_TIMER_ERROR,
1142                                         __ATOMIC_RELEASE);
1143                         break;
1144                 }
1145
1146                 EVTIM_LOG_DBG("armed an event timer");
1147                 /* RELEASE ordering guarantees the adapter specific value
1148                  * changes observed before the update of state.
1149                  */
1150                 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1151                                 __ATOMIC_RELEASE);
1152         }
1153
1154         if (i < nb_evtims)
1155                 rte_mempool_put_bulk(sw->tim_pool,
1156                                      (void **)&tims[i], nb_evtims - i);
1157
1158         return i;
1159 }
1160
1161 static uint16_t
1162 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1163                 struct rte_event_timer **evtims,
1164                 uint16_t nb_evtims)
1165 {
1166         return __swtim_arm_burst(adapter, evtims, nb_evtims);
1167 }
1168
1169 static uint16_t
1170 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1171                    struct rte_event_timer **evtims,
1172                    uint16_t nb_evtims)
1173 {
1174         int i, ret;
1175         struct rte_timer *timp;
1176         uint64_t opaque;
1177         struct swtim *sw = swtim_pmd_priv(adapter);
1178         enum rte_event_timer_state n_state;
1179
1180 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1181         /* Check that the service is running. */
1182         if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1183                 rte_errno = EINVAL;
1184                 return 0;
1185         }
1186 #endif
1187
1188         for (i = 0; i < nb_evtims; i++) {
1189                 /* Don't modify the event timer state in these cases */
1190                 /* ACQUIRE ordering guarantees the access of implementation
1191                  * specific opaque data under the correct state.
1192                  */
1193                 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1194                 if (n_state == RTE_EVENT_TIMER_CANCELED) {
1195                         rte_errno = EALREADY;
1196                         break;
1197                 } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1198                         rte_errno = EINVAL;
1199                         break;
1200                 }
1201
1202                 opaque = evtims[i]->impl_opaque[0];
1203                 timp = (struct rte_timer *)(uintptr_t)opaque;
1204                 RTE_ASSERT(timp != NULL);
1205
1206                 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1207                 if (ret < 0) {
1208                         /* Timer is running or being configured */
1209                         rte_errno = EAGAIN;
1210                         break;
1211                 }
1212
1213                 rte_mempool_put(sw->tim_pool, (void **)timp);
1214
1215                 /* The RELEASE ordering here pairs with atomic ordering
1216                  * to make sure the state update data observed between
1217                  * threads.
1218                  */
1219                 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1220                                 __ATOMIC_RELEASE);
1221         }
1222
1223         return i;
1224 }
1225
1226 static uint16_t
1227 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1228                          struct rte_event_timer **evtims,
1229                          uint64_t timeout_ticks,
1230                          uint16_t nb_evtims)
1231 {
1232         int i;
1233
1234         for (i = 0; i < nb_evtims; i++)
1235                 evtims[i]->timeout_ticks = timeout_ticks;
1236
1237         return __swtim_arm_burst(adapter, evtims, nb_evtims);
1238 }
1239
1240 static const struct event_timer_adapter_ops swtim_ops = {
1241         .init = swtim_init,
1242         .uninit = swtim_uninit,
1243         .start = swtim_start,
1244         .stop = swtim_stop,
1245         .get_info = swtim_get_info,
1246         .stats_get = swtim_stats_get,
1247         .stats_reset = swtim_stats_reset,
1248         .arm_burst = swtim_arm_burst,
1249         .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1250         .cancel_burst = swtim_cancel_burst,
1251 };