1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
23 #include "rte_eventdev.h"
24 #include "eventdev_pmd.h"
25 #include "rte_eventdev_trace.h"
26 #include "rte_event_timer_adapter.h"
27 #include "rte_event_timer_adapter_pmd.h"
29 #define DATA_MZ_NAME_MAX_LEN 64
30 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
32 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
33 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
34 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
36 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
38 static const struct rte_event_timer_adapter_ops swtim_ops;
40 #define EVTIM_LOG(level, logtype, ...) \
41 rte_log(RTE_LOG_ ## level, logtype, \
42 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
43 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
45 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
47 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
48 #define EVTIM_LOG_DBG(...) \
49 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
50 #define EVTIM_BUF_LOG_DBG(...) \
51 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
52 #define EVTIM_SVC_LOG_DBG(...) \
53 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
55 #define EVTIM_LOG_DBG(...) (void)0
56 #define EVTIM_BUF_LOG_DBG(...) (void)0
57 #define EVTIM_SVC_LOG_DBG(...) (void)0
61 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
64 struct rte_event_timer_adapter *adapter;
65 struct rte_eventdev *dev;
66 struct rte_event_dev_config dev_conf;
67 struct rte_event_port_conf *port_conf, def_port_conf = {0};
73 RTE_SET_USED(event_dev_id);
75 adapter = &adapters[id];
76 dev = &rte_eventdevs[adapter->data->event_dev_id];
77 dev_id = dev->data->dev_id;
78 dev_conf = dev->data->dev_conf;
80 started = dev->data->dev_started;
82 rte_event_dev_stop(dev_id);
84 port_id = dev_conf.nb_event_ports;
85 dev_conf.nb_event_ports += 1;
86 ret = rte_event_dev_configure(dev_id, &dev_conf);
88 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
90 if (rte_event_dev_start(dev_id))
99 port_conf = &def_port_conf;
100 ret = rte_event_port_default_conf_get(dev_id, port_id,
106 ret = rte_event_port_setup(dev_id, port_id, port_conf);
108 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
113 *event_port_id = port_id;
116 ret = rte_event_dev_start(dev_id);
121 struct rte_event_timer_adapter *
122 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
124 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
128 struct rte_event_timer_adapter *
129 rte_event_timer_adapter_create_ext(
130 const struct rte_event_timer_adapter_conf *conf,
131 rte_event_timer_adapter_port_conf_cb_t conf_cb,
135 struct rte_event_timer_adapter *adapter;
136 const struct rte_memzone *mz;
137 char mz_name[DATA_MZ_NAME_MAX_LEN];
139 struct rte_eventdev *dev;
146 /* Check eventdev ID */
147 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
151 dev = &rte_eventdevs[conf->event_dev_id];
153 adapter_id = conf->timer_adapter_id;
155 /* Check that adapter_id is in range */
156 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
161 /* Check adapter ID not already allocated */
162 adapter = &adapters[adapter_id];
163 if (adapter->allocated) {
168 /* Create shared data area. */
169 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
170 if (n >= (int)sizeof(mz_name)) {
174 mz = rte_memzone_reserve(mz_name,
175 sizeof(struct rte_event_timer_adapter_data),
178 /* rte_errno set by rte_memzone_reserve */
181 adapter->data = mz->addr;
182 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
184 adapter->data->mz = mz;
185 adapter->data->event_dev_id = conf->event_dev_id;
186 adapter->data->id = adapter_id;
187 adapter->data->socket_id = conf->socket_id;
188 adapter->data->conf = *conf; /* copy conf structure */
190 /* Query eventdev PMD for timer adapter capabilities and ops */
191 ret = dev->dev_ops->timer_adapter_caps_get(dev,
192 adapter->data->conf.flags,
193 &adapter->data->caps,
200 if (!(adapter->data->caps &
201 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
202 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
203 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
204 &adapter->data->event_port_id, conf_arg);
211 /* If eventdev PMD did not provide ops, use default software
214 if (adapter->ops == NULL)
215 adapter->ops = &swtim_ops;
217 /* Allow driver to do some setup */
218 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
219 ret = adapter->ops->init(adapter);
225 /* Set fast-path function pointers */
226 adapter->arm_burst = adapter->ops->arm_burst;
227 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
228 adapter->cancel_burst = adapter->ops->cancel_burst;
230 adapter->allocated = 1;
232 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
237 rte_memzone_free(adapter->data->mz);
242 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
243 struct rte_event_timer_adapter_info *adapter_info)
245 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
247 if (adapter->ops->get_info)
248 /* let driver set values it knows */
249 adapter->ops->get_info(adapter, adapter_info);
251 /* Set common values */
252 adapter_info->conf = adapter->data->conf;
253 adapter_info->event_dev_port_id = adapter->data->event_port_id;
254 adapter_info->caps = adapter->data->caps;
260 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
264 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
265 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
267 if (adapter->data->started) {
268 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
273 ret = adapter->ops->start(adapter);
277 adapter->data->started = 1;
278 rte_eventdev_trace_timer_adapter_start(adapter);
283 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
287 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
288 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
290 if (adapter->data->started == 0) {
291 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
296 ret = adapter->ops->stop(adapter);
300 adapter->data->started = 0;
301 rte_eventdev_trace_timer_adapter_stop(adapter);
305 struct rte_event_timer_adapter *
306 rte_event_timer_adapter_lookup(uint16_t adapter_id)
308 char name[DATA_MZ_NAME_MAX_LEN];
309 const struct rte_memzone *mz;
310 struct rte_event_timer_adapter_data *data;
311 struct rte_event_timer_adapter *adapter;
313 struct rte_eventdev *dev;
315 if (adapters[adapter_id].allocated)
316 return &adapters[adapter_id]; /* Adapter is already loaded */
318 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
319 mz = rte_memzone_lookup(name);
327 adapter = &adapters[data->id];
328 adapter->data = data;
330 dev = &rte_eventdevs[adapter->data->event_dev_id];
332 /* Query eventdev PMD for timer adapter capabilities and ops */
333 ret = dev->dev_ops->timer_adapter_caps_get(dev,
334 adapter->data->conf.flags,
335 &adapter->data->caps,
342 /* If eventdev PMD did not provide ops, use default software
345 if (adapter->ops == NULL)
346 adapter->ops = &swtim_ops;
348 /* Set fast-path function pointers */
349 adapter->arm_burst = adapter->ops->arm_burst;
350 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
351 adapter->cancel_burst = adapter->ops->cancel_burst;
353 adapter->allocated = 1;
359 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
363 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
364 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
366 if (adapter->data->started == 1) {
367 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
368 "before freeing", adapter->data->id);
372 /* free impl priv data */
373 ret = adapter->ops->uninit(adapter);
377 /* free shared data area */
378 ret = rte_memzone_free(adapter->data->mz);
382 adapter->data = NULL;
383 adapter->allocated = 0;
385 rte_eventdev_trace_timer_adapter_free(adapter);
390 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
391 uint32_t *service_id)
393 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
395 if (adapter->data->service_inited && service_id != NULL)
396 *service_id = adapter->data->service_id;
398 return adapter->data->service_inited ? 0 : -ESRCH;
402 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
403 struct rte_event_timer_adapter_stats *stats)
405 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
406 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
410 return adapter->ops->stats_get(adapter, stats);
414 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
416 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
417 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
418 return adapter->ops->stats_reset(adapter);
422 * Software event timer adapter buffer helper functions
425 #define NSECPERSEC 1E9
427 /* Optimizations used to index into the buffer require that the buffer size
430 #define EVENT_BUFFER_SZ 4096
431 #define EVENT_BUFFER_BATCHSZ 32
432 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
434 #define EXP_TIM_BUF_SZ 128
436 struct event_buffer {
439 struct rte_event events[EVENT_BUFFER_SZ];
440 } __rte_cache_aligned;
443 event_buffer_full(struct event_buffer *bufp)
445 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
449 event_buffer_batch_ready(struct event_buffer *bufp)
451 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
455 event_buffer_init(struct event_buffer *bufp)
457 bufp->head = bufp->tail = 0;
458 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
462 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
465 struct rte_event *buf_eventp;
467 if (event_buffer_full(bufp))
470 /* Instead of modulus, bitwise AND with mask to get head_idx. */
471 head_idx = bufp->head & EVENT_BUFFER_MASK;
472 buf_eventp = &bufp->events[head_idx];
473 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
475 /* Wrap automatically when overflow occurs. */
482 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
483 uint16_t *nb_events_flushed,
484 uint16_t *nb_events_inv)
486 struct rte_event *events = bufp->events;
487 size_t head_idx, tail_idx;
490 /* Instead of modulus, bitwise AND with mask to get index. */
491 head_idx = bufp->head & EVENT_BUFFER_MASK;
492 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
494 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
496 /* Determine the largest contigous run we can attempt to enqueue to the
499 if (head_idx > tail_idx)
500 n = head_idx - tail_idx;
501 else if (head_idx < tail_idx)
502 n = EVENT_BUFFER_SZ - tail_idx;
503 else if (event_buffer_full(bufp))
504 n = EVENT_BUFFER_SZ - tail_idx;
506 *nb_events_flushed = 0;
510 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
513 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
514 &events[tail_idx], n);
515 if (*nb_events_flushed != n) {
516 if (rte_errno == EINVAL) {
517 EVTIM_LOG_ERR("failed to enqueue invalid event - "
520 } else if (rte_errno == ENOSPC)
524 if (*nb_events_flushed > 0)
525 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
526 "device", *nb_events_flushed);
528 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
532 * Software event timer adapter implementation
535 /* Identifier of service executing timer management logic. */
537 /* The cycle count at which the adapter should next tick */
538 uint64_t next_tick_cycles;
539 /* The tick resolution used by adapter instance. May have been
540 * adjusted from what user requested
542 uint64_t timer_tick_ns;
543 /* Maximum timeout in nanoseconds allowed by adapter instance. */
545 /* Buffered timer expiry events to be enqueued to an event device. */
546 struct event_buffer buffer;
548 struct rte_event_timer_adapter_stats stats;
549 /* Mempool of timer objects */
550 struct rte_mempool *tim_pool;
551 /* Back pointer for convenience */
552 struct rte_event_timer_adapter *adapter;
553 /* Identifier of timer data instance */
554 uint32_t timer_data_id;
555 /* Track which cores have actually armed a timer */
558 } __rte_cache_aligned in_use[RTE_MAX_LCORE];
559 /* Track which cores' timer lists should be polled */
560 unsigned int poll_lcores[RTE_MAX_LCORE];
561 /* The number of lists that should be polled */
563 /* Timers which have expired and can be returned to a mempool */
564 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
565 /* The number of timers that can be returned to a mempool */
566 size_t n_expired_timers;
569 static inline struct swtim *
570 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
572 return adapter->data->adapter_priv;
576 swtim_callback(struct rte_timer *tim)
578 struct rte_event_timer *evtim = tim->arg;
579 struct rte_event_timer_adapter *adapter;
580 unsigned int lcore = rte_lcore_id();
582 uint16_t nb_evs_flushed = 0;
583 uint16_t nb_evs_invalid = 0;
588 opaque = evtim->impl_opaque[1];
589 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
590 sw = swtim_pmd_priv(adapter);
592 ret = event_buffer_add(&sw->buffer, &evtim->ev);
594 /* If event buffer is full, put timer back in list with
595 * immediate expiry value, so that we process it again on the
598 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
601 EVTIM_LOG_DBG("event buffer full, failed to reset "
602 "timer with immediate expiry value");
604 sw->stats.evtim_retry_count++;
605 EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
606 "with immediate expiry value");
609 if (unlikely(sw->in_use[lcore].v == 0)) {
610 sw->in_use[lcore].v = 1;
611 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
613 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
617 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
619 /* Empty the buffer here, if necessary, to free older expired
622 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
623 rte_mempool_put_bulk(sw->tim_pool,
624 (void **)sw->expired_timers,
625 sw->n_expired_timers);
626 sw->n_expired_timers = 0;
629 sw->expired_timers[sw->n_expired_timers++] = tim;
630 sw->stats.evtim_exp_count++;
632 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
636 if (event_buffer_batch_ready(&sw->buffer)) {
637 event_buffer_flush(&sw->buffer,
638 adapter->data->event_dev_id,
639 adapter->data->event_port_id,
643 sw->stats.ev_enq_count += nb_evs_flushed;
644 sw->stats.ev_inv_count += nb_evs_invalid;
648 static __rte_always_inline uint64_t
649 get_timeout_cycles(struct rte_event_timer *evtim,
650 const struct rte_event_timer_adapter *adapter)
652 struct swtim *sw = swtim_pmd_priv(adapter);
653 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
654 return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
657 /* This function returns true if one or more (adapter) ticks have occurred since
658 * the last time it was called.
661 swtim_did_tick(struct swtim *sw)
663 uint64_t cycles_per_adapter_tick, start_cycles;
664 uint64_t *next_tick_cyclesp;
666 next_tick_cyclesp = &sw->next_tick_cycles;
667 cycles_per_adapter_tick = sw->timer_tick_ns *
668 (rte_get_timer_hz() / NSECPERSEC);
669 start_cycles = rte_get_timer_cycles();
671 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
672 * execute, and set things going.
675 if (start_cycles >= *next_tick_cyclesp) {
676 /* Snap the current cycle count to the preceding adapter tick
679 start_cycles -= start_cycles % cycles_per_adapter_tick;
680 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
688 /* Check that event timer timeout value is in range */
689 static __rte_always_inline int
690 check_timeout(struct rte_event_timer *evtim,
691 const struct rte_event_timer_adapter *adapter)
694 struct swtim *sw = swtim_pmd_priv(adapter);
696 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
697 if (tmo_nsec > sw->max_tmo_ns)
699 if (tmo_nsec < sw->timer_tick_ns)
705 /* Check that event timer event queue sched type matches destination event queue
708 static __rte_always_inline int
709 check_destination_event_queue(struct rte_event_timer *evtim,
710 const struct rte_event_timer_adapter *adapter)
715 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
717 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
720 if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
728 swtim_service_func(void *arg)
730 struct rte_event_timer_adapter *adapter = arg;
731 struct swtim *sw = swtim_pmd_priv(adapter);
732 uint16_t nb_evs_flushed = 0;
733 uint16_t nb_evs_invalid = 0;
735 if (swtim_did_tick(sw)) {
736 rte_timer_alt_manage(sw->timer_data_id,
741 /* Return expired timer objects back to mempool */
742 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
743 sw->n_expired_timers);
744 sw->n_expired_timers = 0;
746 event_buffer_flush(&sw->buffer,
747 adapter->data->event_dev_id,
748 adapter->data->event_port_id,
752 sw->stats.ev_enq_count += nb_evs_flushed;
753 sw->stats.ev_inv_count += nb_evs_invalid;
754 sw->stats.adapter_tick_count++;
760 /* The adapter initialization function rounds the mempool size up to the next
761 * power of 2, so we can take the difference between that value and what the
762 * user requested, and use the space for caches. This avoids a scenario where a
763 * user can't arm the number of timers the adapter was configured with because
764 * mempool objects have been lost to caches.
766 * nb_actual should always be a power of 2, so we can iterate over the powers
767 * of 2 to see what the largest cache size we can use is.
770 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
779 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
780 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
781 size <= nb_actual / 1.5)
791 swtim_init(struct rte_event_timer_adapter *adapter)
796 struct rte_service_spec service;
798 /* Allocate storage for private data area */
799 #define SWTIM_NAMESIZE 32
800 char swtim_name[SWTIM_NAMESIZE];
801 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
803 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
804 adapter->data->socket_id);
806 EVTIM_LOG_ERR("failed to allocate space for private data");
811 /* Connect storage to adapter instance */
812 adapter->data->adapter_priv = sw;
813 sw->adapter = adapter;
815 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
816 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
818 /* Create a timer pool */
819 char pool_name[SWTIM_NAMESIZE];
820 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
822 /* Optimal mempool size is a power of 2 minus one */
823 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
824 int pool_size = nb_timers - 1;
825 int cache_size = compute_msg_mempool_cache_size(
826 adapter->data->conf.nb_timers, nb_timers);
827 flags = 0; /* pool is multi-producer, multi-consumer */
828 sw->tim_pool = rte_mempool_create(pool_name, pool_size,
829 sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
830 NULL, NULL, adapter->data->socket_id, flags);
831 if (sw->tim_pool == NULL) {
832 EVTIM_LOG_ERR("failed to create timer object mempool");
837 /* Initialize the variables that track in-use timer lists */
838 for (i = 0; i < RTE_MAX_LCORE; i++)
841 /* Initialize the timer subsystem and allocate timer data instance */
842 ret = rte_timer_subsystem_init();
844 if (ret != -EALREADY) {
845 EVTIM_LOG_ERR("failed to initialize timer subsystem");
851 ret = rte_timer_data_alloc(&sw->timer_data_id);
853 EVTIM_LOG_ERR("failed to allocate timer data instance");
858 /* Initialize timer event buffer */
859 event_buffer_init(&sw->buffer);
861 sw->adapter = adapter;
863 /* Register a service component to run adapter logic */
864 memset(&service, 0, sizeof(service));
865 snprintf(service.name, RTE_SERVICE_NAME_MAX,
866 "swtim_svc_%"PRIu8, adapter->data->id);
867 service.socket_id = adapter->data->socket_id;
868 service.callback = swtim_service_func;
869 service.callback_userdata = adapter;
870 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
871 ret = rte_service_component_register(&service, &sw->service_id);
873 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
874 ": err = %d", service.name, sw->service_id,
881 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
884 adapter->data->service_id = sw->service_id;
885 adapter->data->service_inited = 1;
889 rte_mempool_free(sw->tim_pool);
896 swtim_free_tim(struct rte_timer *tim, void *arg)
898 struct swtim *sw = arg;
900 rte_mempool_put(sw->tim_pool, tim);
903 /* Traverse the list of outstanding timers and put them back in the mempool
904 * before freeing the adapter to avoid leaking the memory.
907 swtim_uninit(struct rte_event_timer_adapter *adapter)
910 struct swtim *sw = swtim_pmd_priv(adapter);
912 /* Free outstanding timers */
913 rte_timer_stop_all(sw->timer_data_id,
919 ret = rte_service_component_unregister(sw->service_id);
921 EVTIM_LOG_ERR("failed to unregister service component");
925 rte_mempool_free(sw->tim_pool);
927 adapter->data->adapter_priv = NULL;
932 static inline int32_t
933 get_mapped_count_for_service(uint32_t service_id)
935 int32_t core_count, i, mapped_count = 0;
936 uint32_t lcore_arr[RTE_MAX_LCORE];
938 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
940 for (i = 0; i < core_count; i++)
941 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
948 swtim_start(const struct rte_event_timer_adapter *adapter)
951 struct swtim *sw = swtim_pmd_priv(adapter);
953 /* Mapping the service to more than one service core can introduce
954 * delays while one thread is waiting to acquire a lock, so only allow
955 * one core to be mapped to the service.
957 * Note: the service could be modified such that it spreads cores to
958 * poll over multiple service instances.
960 mapped_count = get_mapped_count_for_service(sw->service_id);
962 if (mapped_count != 1)
963 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
965 return rte_service_component_runstate_set(sw->service_id, 1);
969 swtim_stop(const struct rte_event_timer_adapter *adapter)
972 struct swtim *sw = swtim_pmd_priv(adapter);
974 ret = rte_service_component_runstate_set(sw->service_id, 0);
978 /* Wait for the service to complete its final iteration */
979 while (rte_service_may_be_active(sw->service_id))
986 swtim_get_info(const struct rte_event_timer_adapter *adapter,
987 struct rte_event_timer_adapter_info *adapter_info)
989 struct swtim *sw = swtim_pmd_priv(adapter);
990 adapter_info->min_resolution_ns = sw->timer_tick_ns;
991 adapter_info->max_tmo_ns = sw->max_tmo_ns;
995 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
996 struct rte_event_timer_adapter_stats *stats)
998 struct swtim *sw = swtim_pmd_priv(adapter);
999 *stats = sw->stats; /* structure copy */
1004 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1006 struct swtim *sw = swtim_pmd_priv(adapter);
1007 memset(&sw->stats, 0, sizeof(sw->stats));
1012 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1013 struct rte_event_timer **evtims,
1017 struct swtim *sw = swtim_pmd_priv(adapter);
1018 uint32_t lcore_id = rte_lcore_id();
1019 struct rte_timer *tim, *tims[nb_evtims];
1022 /* Timer list for this lcore is not in use. */
1023 uint16_t exp_state = 0;
1024 enum rte_event_timer_state n_state;
1026 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1027 /* Check that the service is running. */
1028 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1034 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1035 * the highest lcore to insert such timers into
1037 if (lcore_id == LCORE_ID_ANY)
1038 lcore_id = RTE_MAX_LCORE - 1;
1040 /* If this is the first time we're arming an event timer on this lcore,
1041 * mark this lcore as "in use"; this will cause the service
1042 * function to process the timer list that corresponds to this lcore.
1043 * The atomic compare-and-swap operation can prevent the race condition
1044 * on in_use flag between multiple non-EAL threads.
1046 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1048 __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1049 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1051 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1053 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1057 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1064 for (i = 0; i < nb_evtims; i++) {
1065 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1066 if (n_state == RTE_EVENT_TIMER_ARMED) {
1067 rte_errno = EALREADY;
1069 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1070 n_state == RTE_EVENT_TIMER_CANCELED)) {
1075 ret = check_timeout(evtims[i], adapter);
1076 if (unlikely(ret == -1)) {
1077 __atomic_store_n(&evtims[i]->state,
1078 RTE_EVENT_TIMER_ERROR_TOOLATE,
1082 } else if (unlikely(ret == -2)) {
1083 __atomic_store_n(&evtims[i]->state,
1084 RTE_EVENT_TIMER_ERROR_TOOEARLY,
1090 if (unlikely(check_destination_event_queue(evtims[i],
1092 __atomic_store_n(&evtims[i]->state,
1093 RTE_EVENT_TIMER_ERROR,
1100 rte_timer_init(tim);
1102 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1103 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1105 cycles = get_timeout_cycles(evtims[i], adapter);
1106 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1107 SINGLE, lcore_id, NULL, evtims[i]);
1109 /* tim was in RUNNING or CONFIG state */
1110 __atomic_store_n(&evtims[i]->state,
1111 RTE_EVENT_TIMER_ERROR,
1116 EVTIM_LOG_DBG("armed an event timer");
1117 /* RELEASE ordering guarantees the adapter specific value
1118 * changes observed before the update of state.
1120 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1125 rte_mempool_put_bulk(sw->tim_pool,
1126 (void **)&tims[i], nb_evtims - i);
1132 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1133 struct rte_event_timer **evtims,
1136 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1140 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1141 struct rte_event_timer **evtims,
1145 struct rte_timer *timp;
1147 struct swtim *sw = swtim_pmd_priv(adapter);
1148 enum rte_event_timer_state n_state;
1150 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1151 /* Check that the service is running. */
1152 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1158 for (i = 0; i < nb_evtims; i++) {
1159 /* Don't modify the event timer state in these cases */
1160 /* ACQUIRE ordering guarantees the access of implementation
1161 * specific opaque data under the correct state.
1163 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1164 if (n_state == RTE_EVENT_TIMER_CANCELED) {
1165 rte_errno = EALREADY;
1167 } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1172 opaque = evtims[i]->impl_opaque[0];
1173 timp = (struct rte_timer *)(uintptr_t)opaque;
1174 RTE_ASSERT(timp != NULL);
1176 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1178 /* Timer is running or being configured */
1183 rte_mempool_put(sw->tim_pool, (void **)timp);
1185 /* The RELEASE ordering here pairs with atomic ordering
1186 * to make sure the state update data observed between
1189 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1197 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1198 struct rte_event_timer **evtims,
1199 uint64_t timeout_ticks,
1204 for (i = 0; i < nb_evtims; i++)
1205 evtims[i]->timeout_ticks = timeout_ticks;
1207 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1210 static const struct rte_event_timer_adapter_ops swtim_ops = {
1212 .uninit = swtim_uninit,
1213 .start = swtim_start,
1215 .get_info = swtim_get_info,
1216 .stats_get = swtim_stats_get,
1217 .stats_reset = swtim_stats_reset,
1218 .arm_burst = swtim_arm_burst,
1219 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1220 .cancel_burst = swtim_cancel_burst,