1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_timer.h>
16 #include <rte_service_component.h>
17 #include <rte_telemetry.h>
19 #include "event_timer_adapter_pmd.h"
20 #include "eventdev_pmd.h"
21 #include "rte_event_timer_adapter.h"
22 #include "rte_eventdev.h"
23 #include "eventdev_trace.h"
25 #define DATA_MZ_NAME_MAX_LEN 64
26 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
28 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
29 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
30 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
32 static struct rte_event_timer_adapter *adapters;
34 static const struct event_timer_adapter_ops swtim_ops;
36 #define EVTIM_LOG(level, logtype, ...) \
37 rte_log(RTE_LOG_ ## level, logtype, \
38 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
39 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
41 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
43 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
44 #define EVTIM_LOG_DBG(...) \
45 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
46 #define EVTIM_BUF_LOG_DBG(...) \
47 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
48 #define EVTIM_SVC_LOG_DBG(...) \
49 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
51 #define EVTIM_LOG_DBG(...) (void)0
52 #define EVTIM_BUF_LOG_DBG(...) (void)0
53 #define EVTIM_SVC_LOG_DBG(...) (void)0
57 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
60 struct rte_event_timer_adapter *adapter;
61 struct rte_eventdev *dev;
62 struct rte_event_dev_config dev_conf;
63 struct rte_event_port_conf *port_conf, def_port_conf = {0};
69 RTE_SET_USED(event_dev_id);
71 adapter = &adapters[id];
72 dev = &rte_eventdevs[adapter->data->event_dev_id];
73 dev_id = dev->data->dev_id;
74 dev_conf = dev->data->dev_conf;
76 started = dev->data->dev_started;
78 rte_event_dev_stop(dev_id);
80 port_id = dev_conf.nb_event_ports;
81 dev_conf.nb_event_ports += 1;
82 ret = rte_event_dev_configure(dev_id, &dev_conf);
84 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
86 if (rte_event_dev_start(dev_id))
95 port_conf = &def_port_conf;
96 ret = rte_event_port_default_conf_get(dev_id, port_id,
102 ret = rte_event_port_setup(dev_id, port_id, port_conf);
104 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
109 *event_port_id = port_id;
112 ret = rte_event_dev_start(dev_id);
117 struct rte_event_timer_adapter *
118 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
120 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
124 struct rte_event_timer_adapter *
125 rte_event_timer_adapter_create_ext(
126 const struct rte_event_timer_adapter_conf *conf,
127 rte_event_timer_adapter_port_conf_cb_t conf_cb,
131 struct rte_event_timer_adapter *adapter;
132 const struct rte_memzone *mz;
133 char mz_name[DATA_MZ_NAME_MAX_LEN];
135 struct rte_eventdev *dev;
137 if (adapters == NULL) {
138 adapters = rte_zmalloc("Eventdev",
139 sizeof(struct rte_event_timer_adapter) *
140 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
141 RTE_CACHE_LINE_SIZE);
142 if (adapters == NULL) {
153 /* Check eventdev ID */
154 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
158 dev = &rte_eventdevs[conf->event_dev_id];
160 adapter_id = conf->timer_adapter_id;
162 /* Check that adapter_id is in range */
163 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
168 /* Check adapter ID not already allocated */
169 adapter = &adapters[adapter_id];
170 if (adapter->allocated) {
175 /* Create shared data area. */
176 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
177 if (n >= (int)sizeof(mz_name)) {
181 mz = rte_memzone_reserve(mz_name,
182 sizeof(struct rte_event_timer_adapter_data),
185 /* rte_errno set by rte_memzone_reserve */
188 adapter->data = mz->addr;
189 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
191 adapter->data->mz = mz;
192 adapter->data->event_dev_id = conf->event_dev_id;
193 adapter->data->id = adapter_id;
194 adapter->data->socket_id = conf->socket_id;
195 adapter->data->conf = *conf; /* copy conf structure */
197 /* Query eventdev PMD for timer adapter capabilities and ops */
198 ret = dev->dev_ops->timer_adapter_caps_get(dev,
199 adapter->data->conf.flags,
200 &adapter->data->caps,
207 if (!(adapter->data->caps &
208 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
209 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
210 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
211 &adapter->data->event_port_id, conf_arg);
218 /* If eventdev PMD did not provide ops, use default software
221 if (adapter->ops == NULL)
222 adapter->ops = &swtim_ops;
224 /* Allow driver to do some setup */
225 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
226 ret = adapter->ops->init(adapter);
232 /* Set fast-path function pointers */
233 adapter->arm_burst = adapter->ops->arm_burst;
234 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
235 adapter->cancel_burst = adapter->ops->cancel_burst;
237 adapter->allocated = 1;
239 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
244 rte_memzone_free(adapter->data->mz);
249 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
250 struct rte_event_timer_adapter_info *adapter_info)
252 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
254 if (adapter->ops->get_info)
255 /* let driver set values it knows */
256 adapter->ops->get_info(adapter, adapter_info);
258 /* Set common values */
259 adapter_info->conf = adapter->data->conf;
260 adapter_info->event_dev_port_id = adapter->data->event_port_id;
261 adapter_info->caps = adapter->data->caps;
267 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
271 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
272 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
274 if (adapter->data->started) {
275 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
280 ret = adapter->ops->start(adapter);
284 adapter->data->started = 1;
285 rte_eventdev_trace_timer_adapter_start(adapter);
290 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
294 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
295 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
297 if (adapter->data->started == 0) {
298 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
303 ret = adapter->ops->stop(adapter);
307 adapter->data->started = 0;
308 rte_eventdev_trace_timer_adapter_stop(adapter);
312 struct rte_event_timer_adapter *
313 rte_event_timer_adapter_lookup(uint16_t adapter_id)
315 char name[DATA_MZ_NAME_MAX_LEN];
316 const struct rte_memzone *mz;
317 struct rte_event_timer_adapter_data *data;
318 struct rte_event_timer_adapter *adapter;
320 struct rte_eventdev *dev;
322 if (adapters == NULL) {
323 adapters = rte_zmalloc("Eventdev",
324 sizeof(struct rte_event_timer_adapter) *
325 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
326 RTE_CACHE_LINE_SIZE);
327 if (adapters == NULL) {
333 if (adapters[adapter_id].allocated)
334 return &adapters[adapter_id]; /* Adapter is already loaded */
336 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
337 mz = rte_memzone_lookup(name);
345 adapter = &adapters[data->id];
346 adapter->data = data;
348 dev = &rte_eventdevs[adapter->data->event_dev_id];
350 /* Query eventdev PMD for timer adapter capabilities and ops */
351 ret = dev->dev_ops->timer_adapter_caps_get(dev,
352 adapter->data->conf.flags,
353 &adapter->data->caps,
360 /* If eventdev PMD did not provide ops, use default software
363 if (adapter->ops == NULL)
364 adapter->ops = &swtim_ops;
366 /* Set fast-path function pointers */
367 adapter->arm_burst = adapter->ops->arm_burst;
368 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
369 adapter->cancel_burst = adapter->ops->cancel_burst;
371 adapter->allocated = 1;
377 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
381 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
382 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
384 if (adapter->data->started == 1) {
385 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
386 "before freeing", adapter->data->id);
390 /* free impl priv data */
391 ret = adapter->ops->uninit(adapter);
395 /* free shared data area */
396 ret = rte_memzone_free(adapter->data->mz);
400 adapter->data = NULL;
401 adapter->allocated = 0;
404 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
405 if (adapters[i].allocated)
406 ret = adapters[i].allocated;
413 rte_eventdev_trace_timer_adapter_free(adapter);
418 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
419 uint32_t *service_id)
421 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
423 if (adapter->data->service_inited && service_id != NULL)
424 *service_id = adapter->data->service_id;
426 return adapter->data->service_inited ? 0 : -ESRCH;
430 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
431 struct rte_event_timer_adapter_stats *stats)
433 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
434 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
438 return adapter->ops->stats_get(adapter, stats);
442 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
444 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
445 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
446 return adapter->ops->stats_reset(adapter);
450 * Software event timer adapter buffer helper functions
453 #define NSECPERSEC 1E9
455 /* Optimizations used to index into the buffer require that the buffer size
458 #define EVENT_BUFFER_SZ 4096
459 #define EVENT_BUFFER_BATCHSZ 32
460 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
462 #define EXP_TIM_BUF_SZ 128
464 struct event_buffer {
467 struct rte_event events[EVENT_BUFFER_SZ];
468 } __rte_cache_aligned;
471 event_buffer_full(struct event_buffer *bufp)
473 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
477 event_buffer_batch_ready(struct event_buffer *bufp)
479 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
483 event_buffer_init(struct event_buffer *bufp)
485 bufp->head = bufp->tail = 0;
486 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
490 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
493 struct rte_event *buf_eventp;
495 if (event_buffer_full(bufp))
498 /* Instead of modulus, bitwise AND with mask to get head_idx. */
499 head_idx = bufp->head & EVENT_BUFFER_MASK;
500 buf_eventp = &bufp->events[head_idx];
501 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
503 /* Wrap automatically when overflow occurs. */
510 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
511 uint16_t *nb_events_flushed,
512 uint16_t *nb_events_inv)
514 struct rte_event *events = bufp->events;
515 size_t head_idx, tail_idx;
518 /* Instead of modulus, bitwise AND with mask to get index. */
519 head_idx = bufp->head & EVENT_BUFFER_MASK;
520 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
522 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
524 /* Determine the largest contiguous run we can attempt to enqueue to the
527 if (head_idx > tail_idx)
528 n = head_idx - tail_idx;
529 else if (head_idx < tail_idx)
530 n = EVENT_BUFFER_SZ - tail_idx;
531 else if (event_buffer_full(bufp))
532 n = EVENT_BUFFER_SZ - tail_idx;
534 *nb_events_flushed = 0;
538 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
541 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
542 &events[tail_idx], n);
543 if (*nb_events_flushed != n) {
544 if (rte_errno == EINVAL) {
545 EVTIM_LOG_ERR("failed to enqueue invalid event - "
548 } else if (rte_errno == ENOSPC)
552 if (*nb_events_flushed > 0)
553 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
554 "device", *nb_events_flushed);
556 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
560 * Software event timer adapter implementation
563 /* Identifier of service executing timer management logic. */
565 /* The cycle count at which the adapter should next tick */
566 uint64_t next_tick_cycles;
567 /* The tick resolution used by adapter instance. May have been
568 * adjusted from what user requested
570 uint64_t timer_tick_ns;
571 /* Maximum timeout in nanoseconds allowed by adapter instance. */
573 /* Buffered timer expiry events to be enqueued to an event device. */
574 struct event_buffer buffer;
576 struct rte_event_timer_adapter_stats stats;
577 /* Mempool of timer objects */
578 struct rte_mempool *tim_pool;
579 /* Back pointer for convenience */
580 struct rte_event_timer_adapter *adapter;
581 /* Identifier of timer data instance */
582 uint32_t timer_data_id;
583 /* Track which cores have actually armed a timer */
586 } __rte_cache_aligned in_use[RTE_MAX_LCORE];
587 /* Track which cores' timer lists should be polled */
588 unsigned int poll_lcores[RTE_MAX_LCORE];
589 /* The number of lists that should be polled */
591 /* Timers which have expired and can be returned to a mempool */
592 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
593 /* The number of timers that can be returned to a mempool */
594 size_t n_expired_timers;
597 static inline struct swtim *
598 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
600 return adapter->data->adapter_priv;
604 swtim_callback(struct rte_timer *tim)
606 struct rte_event_timer *evtim = tim->arg;
607 struct rte_event_timer_adapter *adapter;
608 unsigned int lcore = rte_lcore_id();
610 uint16_t nb_evs_flushed = 0;
611 uint16_t nb_evs_invalid = 0;
616 opaque = evtim->impl_opaque[1];
617 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
618 sw = swtim_pmd_priv(adapter);
620 ret = event_buffer_add(&sw->buffer, &evtim->ev);
622 /* If event buffer is full, put timer back in list with
623 * immediate expiry value, so that we process it again on the
626 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
629 EVTIM_LOG_DBG("event buffer full, failed to reset "
630 "timer with immediate expiry value");
632 sw->stats.evtim_retry_count++;
633 EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
634 "with immediate expiry value");
637 if (unlikely(sw->in_use[lcore].v == 0)) {
638 sw->in_use[lcore].v = 1;
639 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
641 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
645 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
647 /* Empty the buffer here, if necessary, to free older expired
650 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
651 rte_mempool_put_bulk(sw->tim_pool,
652 (void **)sw->expired_timers,
653 sw->n_expired_timers);
654 sw->n_expired_timers = 0;
657 sw->expired_timers[sw->n_expired_timers++] = tim;
658 sw->stats.evtim_exp_count++;
660 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
664 if (event_buffer_batch_ready(&sw->buffer)) {
665 event_buffer_flush(&sw->buffer,
666 adapter->data->event_dev_id,
667 adapter->data->event_port_id,
671 sw->stats.ev_enq_count += nb_evs_flushed;
672 sw->stats.ev_inv_count += nb_evs_invalid;
676 static __rte_always_inline uint64_t
677 get_timeout_cycles(struct rte_event_timer *evtim,
678 const struct rte_event_timer_adapter *adapter)
680 struct swtim *sw = swtim_pmd_priv(adapter);
681 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
682 return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
685 /* This function returns true if one or more (adapter) ticks have occurred since
686 * the last time it was called.
689 swtim_did_tick(struct swtim *sw)
691 uint64_t cycles_per_adapter_tick, start_cycles;
692 uint64_t *next_tick_cyclesp;
694 next_tick_cyclesp = &sw->next_tick_cycles;
695 cycles_per_adapter_tick = sw->timer_tick_ns *
696 (rte_get_timer_hz() / NSECPERSEC);
697 start_cycles = rte_get_timer_cycles();
699 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
700 * execute, and set things going.
703 if (start_cycles >= *next_tick_cyclesp) {
704 /* Snap the current cycle count to the preceding adapter tick
707 start_cycles -= start_cycles % cycles_per_adapter_tick;
708 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
716 /* Check that event timer timeout value is in range */
717 static __rte_always_inline int
718 check_timeout(struct rte_event_timer *evtim,
719 const struct rte_event_timer_adapter *adapter)
722 struct swtim *sw = swtim_pmd_priv(adapter);
724 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
725 if (tmo_nsec > sw->max_tmo_ns)
727 if (tmo_nsec < sw->timer_tick_ns)
733 /* Check that event timer event queue sched type matches destination event queue
736 static __rte_always_inline int
737 check_destination_event_queue(struct rte_event_timer *evtim,
738 const struct rte_event_timer_adapter *adapter)
743 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
745 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
748 if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
756 swtim_service_func(void *arg)
758 struct rte_event_timer_adapter *adapter = arg;
759 struct swtim *sw = swtim_pmd_priv(adapter);
760 uint16_t nb_evs_flushed = 0;
761 uint16_t nb_evs_invalid = 0;
763 if (swtim_did_tick(sw)) {
764 rte_timer_alt_manage(sw->timer_data_id,
769 /* Return expired timer objects back to mempool */
770 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
771 sw->n_expired_timers);
772 sw->n_expired_timers = 0;
774 event_buffer_flush(&sw->buffer,
775 adapter->data->event_dev_id,
776 adapter->data->event_port_id,
780 sw->stats.ev_enq_count += nb_evs_flushed;
781 sw->stats.ev_inv_count += nb_evs_invalid;
782 sw->stats.adapter_tick_count++;
785 rte_event_maintain(adapter->data->event_dev_id,
786 adapter->data->event_port_id, 0);
791 /* The adapter initialization function rounds the mempool size up to the next
792 * power of 2, so we can take the difference between that value and what the
793 * user requested, and use the space for caches. This avoids a scenario where a
794 * user can't arm the number of timers the adapter was configured with because
795 * mempool objects have been lost to caches.
797 * nb_actual should always be a power of 2, so we can iterate over the powers
798 * of 2 to see what the largest cache size we can use is.
801 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
810 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
811 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
812 size <= nb_actual / 1.5)
822 swtim_init(struct rte_event_timer_adapter *adapter)
827 struct rte_service_spec service;
829 /* Allocate storage for private data area */
830 #define SWTIM_NAMESIZE 32
831 char swtim_name[SWTIM_NAMESIZE];
832 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
834 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
835 adapter->data->socket_id);
837 EVTIM_LOG_ERR("failed to allocate space for private data");
842 /* Connect storage to adapter instance */
843 adapter->data->adapter_priv = sw;
844 sw->adapter = adapter;
846 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
847 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
849 /* Create a timer pool */
850 char pool_name[SWTIM_NAMESIZE];
851 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
853 /* Optimal mempool size is a power of 2 minus one */
854 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
855 int pool_size = nb_timers - 1;
856 int cache_size = compute_msg_mempool_cache_size(
857 adapter->data->conf.nb_timers, nb_timers);
858 flags = 0; /* pool is multi-producer, multi-consumer */
859 sw->tim_pool = rte_mempool_create(pool_name, pool_size,
860 sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
861 NULL, NULL, adapter->data->socket_id, flags);
862 if (sw->tim_pool == NULL) {
863 EVTIM_LOG_ERR("failed to create timer object mempool");
868 /* Initialize the variables that track in-use timer lists */
869 for (i = 0; i < RTE_MAX_LCORE; i++)
872 /* Initialize the timer subsystem and allocate timer data instance */
873 ret = rte_timer_subsystem_init();
875 if (ret != -EALREADY) {
876 EVTIM_LOG_ERR("failed to initialize timer subsystem");
882 ret = rte_timer_data_alloc(&sw->timer_data_id);
884 EVTIM_LOG_ERR("failed to allocate timer data instance");
889 /* Initialize timer event buffer */
890 event_buffer_init(&sw->buffer);
892 sw->adapter = adapter;
894 /* Register a service component to run adapter logic */
895 memset(&service, 0, sizeof(service));
896 snprintf(service.name, RTE_SERVICE_NAME_MAX,
897 "swtim_svc_%"PRIu8, adapter->data->id);
898 service.socket_id = adapter->data->socket_id;
899 service.callback = swtim_service_func;
900 service.callback_userdata = adapter;
901 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
902 ret = rte_service_component_register(&service, &sw->service_id);
904 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
905 ": err = %d", service.name, sw->service_id,
912 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
915 adapter->data->service_id = sw->service_id;
916 adapter->data->service_inited = 1;
920 rte_mempool_free(sw->tim_pool);
927 swtim_free_tim(struct rte_timer *tim, void *arg)
929 struct swtim *sw = arg;
931 rte_mempool_put(sw->tim_pool, tim);
934 /* Traverse the list of outstanding timers and put them back in the mempool
935 * before freeing the adapter to avoid leaking the memory.
938 swtim_uninit(struct rte_event_timer_adapter *adapter)
941 struct swtim *sw = swtim_pmd_priv(adapter);
943 /* Free outstanding timers */
944 rte_timer_stop_all(sw->timer_data_id,
950 ret = rte_service_component_unregister(sw->service_id);
952 EVTIM_LOG_ERR("failed to unregister service component");
956 rte_mempool_free(sw->tim_pool);
958 adapter->data->adapter_priv = NULL;
963 static inline int32_t
964 get_mapped_count_for_service(uint32_t service_id)
966 int32_t core_count, i, mapped_count = 0;
967 uint32_t lcore_arr[RTE_MAX_LCORE];
969 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
971 for (i = 0; i < core_count; i++)
972 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
979 swtim_start(const struct rte_event_timer_adapter *adapter)
982 struct swtim *sw = swtim_pmd_priv(adapter);
984 /* Mapping the service to more than one service core can introduce
985 * delays while one thread is waiting to acquire a lock, so only allow
986 * one core to be mapped to the service.
988 * Note: the service could be modified such that it spreads cores to
989 * poll over multiple service instances.
991 mapped_count = get_mapped_count_for_service(sw->service_id);
993 if (mapped_count != 1)
994 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
996 return rte_service_component_runstate_set(sw->service_id, 1);
1000 swtim_stop(const struct rte_event_timer_adapter *adapter)
1003 struct swtim *sw = swtim_pmd_priv(adapter);
1005 ret = rte_service_component_runstate_set(sw->service_id, 0);
1009 /* Wait for the service to complete its final iteration */
1010 while (rte_service_may_be_active(sw->service_id))
1017 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1018 struct rte_event_timer_adapter_info *adapter_info)
1020 struct swtim *sw = swtim_pmd_priv(adapter);
1021 adapter_info->min_resolution_ns = sw->timer_tick_ns;
1022 adapter_info->max_tmo_ns = sw->max_tmo_ns;
1026 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1027 struct rte_event_timer_adapter_stats *stats)
1029 struct swtim *sw = swtim_pmd_priv(adapter);
1030 *stats = sw->stats; /* structure copy */
1035 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1037 struct swtim *sw = swtim_pmd_priv(adapter);
1038 memset(&sw->stats, 0, sizeof(sw->stats));
1043 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1044 struct rte_event_timer **evtims,
1048 struct swtim *sw = swtim_pmd_priv(adapter);
1049 uint32_t lcore_id = rte_lcore_id();
1050 struct rte_timer *tim, *tims[nb_evtims];
1053 /* Timer list for this lcore is not in use. */
1054 uint16_t exp_state = 0;
1055 enum rte_event_timer_state n_state;
1057 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1058 /* Check that the service is running. */
1059 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1065 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1066 * the highest lcore to insert such timers into
1068 if (lcore_id == LCORE_ID_ANY)
1069 lcore_id = RTE_MAX_LCORE - 1;
1071 /* If this is the first time we're arming an event timer on this lcore,
1072 * mark this lcore as "in use"; this will cause the service
1073 * function to process the timer list that corresponds to this lcore.
1074 * The atomic compare-and-swap operation can prevent the race condition
1075 * on in_use flag between multiple non-EAL threads.
1077 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1079 __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1080 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1082 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1084 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1088 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1095 for (i = 0; i < nb_evtims; i++) {
1096 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1097 if (n_state == RTE_EVENT_TIMER_ARMED) {
1098 rte_errno = EALREADY;
1100 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1101 n_state == RTE_EVENT_TIMER_CANCELED)) {
1106 ret = check_timeout(evtims[i], adapter);
1107 if (unlikely(ret == -1)) {
1108 __atomic_store_n(&evtims[i]->state,
1109 RTE_EVENT_TIMER_ERROR_TOOLATE,
1113 } else if (unlikely(ret == -2)) {
1114 __atomic_store_n(&evtims[i]->state,
1115 RTE_EVENT_TIMER_ERROR_TOOEARLY,
1121 if (unlikely(check_destination_event_queue(evtims[i],
1123 __atomic_store_n(&evtims[i]->state,
1124 RTE_EVENT_TIMER_ERROR,
1131 rte_timer_init(tim);
1133 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1134 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1136 cycles = get_timeout_cycles(evtims[i], adapter);
1137 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1138 SINGLE, lcore_id, NULL, evtims[i]);
1140 /* tim was in RUNNING or CONFIG state */
1141 __atomic_store_n(&evtims[i]->state,
1142 RTE_EVENT_TIMER_ERROR,
1147 EVTIM_LOG_DBG("armed an event timer");
1148 /* RELEASE ordering guarantees the adapter specific value
1149 * changes observed before the update of state.
1151 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1156 rte_mempool_put_bulk(sw->tim_pool,
1157 (void **)&tims[i], nb_evtims - i);
1163 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1164 struct rte_event_timer **evtims,
1167 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1171 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1172 struct rte_event_timer **evtims,
1176 struct rte_timer *timp;
1178 struct swtim *sw = swtim_pmd_priv(adapter);
1179 enum rte_event_timer_state n_state;
1181 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1182 /* Check that the service is running. */
1183 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1189 for (i = 0; i < nb_evtims; i++) {
1190 /* Don't modify the event timer state in these cases */
1191 /* ACQUIRE ordering guarantees the access of implementation
1192 * specific opaque data under the correct state.
1194 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1195 if (n_state == RTE_EVENT_TIMER_CANCELED) {
1196 rte_errno = EALREADY;
1198 } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1203 opaque = evtims[i]->impl_opaque[0];
1204 timp = (struct rte_timer *)(uintptr_t)opaque;
1205 RTE_ASSERT(timp != NULL);
1207 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1209 /* Timer is running or being configured */
1214 rte_mempool_put(sw->tim_pool, (void **)timp);
1216 /* The RELEASE ordering here pairs with atomic ordering
1217 * to make sure the state update data observed between
1220 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1228 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1229 struct rte_event_timer **evtims,
1230 uint64_t timeout_ticks,
1235 for (i = 0; i < nb_evtims; i++)
1236 evtims[i]->timeout_ticks = timeout_ticks;
1238 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1241 static const struct event_timer_adapter_ops swtim_ops = {
1243 .uninit = swtim_uninit,
1244 .start = swtim_start,
1246 .get_info = swtim_get_info,
1247 .stats_get = swtim_stats_get,
1248 .stats_reset = swtim_stats_reset,
1249 .arm_burst = swtim_arm_burst,
1250 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1251 .cancel_burst = swtim_cancel_burst,
1255 handle_ta_info(const char *cmd __rte_unused, const char *params,
1256 struct rte_tel_data *d)
1258 struct rte_event_timer_adapter_info adapter_info;
1259 struct rte_event_timer_adapter *adapter;
1260 uint16_t adapter_id;
1263 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1266 adapter_id = atoi(params);
1268 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1269 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1273 adapter = &adapters[adapter_id];
1275 ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1277 EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1281 rte_tel_data_start_dict(d);
1282 rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1283 rte_tel_data_add_dict_u64(d, "min_resolution_ns", adapter_info.min_resolution_ns);
1284 rte_tel_data_add_dict_u64(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1285 rte_tel_data_add_dict_u64(d, "event_dev_id", adapter_info.conf.event_dev_id);
1286 rte_tel_data_add_dict_u64(d, "socket_id", adapter_info.conf.socket_id);
1287 rte_tel_data_add_dict_u64(d, "clk_src", adapter_info.conf.clk_src);
1288 rte_tel_data_add_dict_u64(d, "timer_tick_ns", adapter_info.conf.timer_tick_ns);
1289 rte_tel_data_add_dict_u64(d, "nb_timers", adapter_info.conf.nb_timers);
1290 rte_tel_data_add_dict_u64(d, "flags", adapter_info.conf.flags);
1296 handle_ta_stats(const char *cmd __rte_unused, const char *params,
1297 struct rte_tel_data *d)
1299 struct rte_event_timer_adapter_stats stats;
1300 struct rte_event_timer_adapter *adapter;
1301 uint16_t adapter_id;
1304 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1307 adapter_id = atoi(params);
1309 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1310 EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1314 adapter = &adapters[adapter_id];
1316 ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1318 EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1322 rte_tel_data_start_dict(d);
1323 rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1324 rte_tel_data_add_dict_u64(d, "evtim_exp_count", stats.evtim_exp_count);
1325 rte_tel_data_add_dict_u64(d, "ev_enq_count", stats.ev_enq_count);
1326 rte_tel_data_add_dict_u64(d, "ev_inv_count", stats.ev_inv_count);
1327 rte_tel_data_add_dict_u64(d, "evtim_retry_count", stats.evtim_retry_count);
1328 rte_tel_data_add_dict_u64(d, "adapter_tick_count", stats.adapter_tick_count);
1333 RTE_INIT(ta_init_telemetry)
1335 rte_telemetry_register_cmd("/eventdev/ta_info",
1337 "Returns Timer adapter info. Parameter: Timer adapter id");
1339 rte_telemetry_register_cmd("/eventdev/ta_stats",
1341 "Returns Timer adapter stats. Parameter: Timer adapter id");