1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 static int evtim_logtype;
32 static int evtim_svc_logtype;
33 static int evtim_buffer_logtype;
35 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
37 static const struct rte_event_timer_adapter_ops swtim_ops;
39 #define EVTIM_LOG(level, logtype, ...) \
40 rte_log(RTE_LOG_ ## level, logtype, \
41 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
60 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
63 struct rte_event_timer_adapter *adapter;
64 struct rte_eventdev *dev;
65 struct rte_event_dev_config dev_conf;
66 struct rte_event_port_conf *port_conf, def_port_conf = {0};
72 RTE_SET_USED(event_dev_id);
74 adapter = &adapters[id];
75 dev = &rte_eventdevs[adapter->data->event_dev_id];
76 dev_id = dev->data->dev_id;
77 dev_conf = dev->data->dev_conf;
79 started = dev->data->dev_started;
81 rte_event_dev_stop(dev_id);
83 port_id = dev_conf.nb_event_ports;
84 dev_conf.nb_event_ports += 1;
85 ret = rte_event_dev_configure(dev_id, &dev_conf);
87 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
89 if (rte_event_dev_start(dev_id))
98 port_conf = &def_port_conf;
99 ret = rte_event_port_default_conf_get(dev_id, port_id,
105 ret = rte_event_port_setup(dev_id, port_id, port_conf);
107 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
112 *event_port_id = port_id;
115 ret = rte_event_dev_start(dev_id);
120 struct rte_event_timer_adapter *
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
123 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
127 struct rte_event_timer_adapter *
128 rte_event_timer_adapter_create_ext(
129 const struct rte_event_timer_adapter_conf *conf,
130 rte_event_timer_adapter_port_conf_cb_t conf_cb,
134 struct rte_event_timer_adapter *adapter;
135 const struct rte_memzone *mz;
136 char mz_name[DATA_MZ_NAME_MAX_LEN];
138 struct rte_eventdev *dev;
145 /* Check eventdev ID */
146 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
150 dev = &rte_eventdevs[conf->event_dev_id];
152 adapter_id = conf->timer_adapter_id;
154 /* Check that adapter_id is in range */
155 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
160 /* Check adapter ID not already allocated */
161 adapter = &adapters[adapter_id];
162 if (adapter->allocated) {
167 /* Create shared data area. */
168 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
169 if (n >= (int)sizeof(mz_name)) {
173 mz = rte_memzone_reserve(mz_name,
174 sizeof(struct rte_event_timer_adapter_data),
177 /* rte_errno set by rte_memzone_reserve */
180 adapter->data = mz->addr;
181 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
183 adapter->data->mz = mz;
184 adapter->data->event_dev_id = conf->event_dev_id;
185 adapter->data->id = adapter_id;
186 adapter->data->socket_id = conf->socket_id;
187 adapter->data->conf = *conf; /* copy conf structure */
189 /* Query eventdev PMD for timer adapter capabilities and ops */
190 ret = dev->dev_ops->timer_adapter_caps_get(dev,
191 adapter->data->conf.flags,
192 &adapter->data->caps,
199 if (!(adapter->data->caps &
200 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
201 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
202 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
203 &adapter->data->event_port_id, conf_arg);
210 /* If eventdev PMD did not provide ops, use default software
213 if (adapter->ops == NULL)
214 adapter->ops = &swtim_ops;
216 /* Allow driver to do some setup */
217 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
218 ret = adapter->ops->init(adapter);
224 /* Set fast-path function pointers */
225 adapter->arm_burst = adapter->ops->arm_burst;
226 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
227 adapter->cancel_burst = adapter->ops->cancel_burst;
229 adapter->allocated = 1;
234 rte_memzone_free(adapter->data->mz);
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
240 struct rte_event_timer_adapter_info *adapter_info)
242 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
244 if (adapter->ops->get_info)
245 /* let driver set values it knows */
246 adapter->ops->get_info(adapter, adapter_info);
248 /* Set common values */
249 adapter_info->conf = adapter->data->conf;
250 adapter_info->event_dev_port_id = adapter->data->event_port_id;
251 adapter_info->caps = adapter->data->caps;
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
261 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
262 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
264 if (adapter->data->started) {
265 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
270 ret = adapter->ops->start(adapter);
274 adapter->data->started = 1;
280 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
284 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
285 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
287 if (adapter->data->started == 0) {
288 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
293 ret = adapter->ops->stop(adapter);
297 adapter->data->started = 0;
302 struct rte_event_timer_adapter *
303 rte_event_timer_adapter_lookup(uint16_t adapter_id)
305 char name[DATA_MZ_NAME_MAX_LEN];
306 const struct rte_memzone *mz;
307 struct rte_event_timer_adapter_data *data;
308 struct rte_event_timer_adapter *adapter;
310 struct rte_eventdev *dev;
312 if (adapters[adapter_id].allocated)
313 return &adapters[adapter_id]; /* Adapter is already loaded */
315 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
316 mz = rte_memzone_lookup(name);
324 adapter = &adapters[data->id];
325 adapter->data = data;
327 dev = &rte_eventdevs[adapter->data->event_dev_id];
329 /* Query eventdev PMD for timer adapter capabilities and ops */
330 ret = dev->dev_ops->timer_adapter_caps_get(dev,
331 adapter->data->conf.flags,
332 &adapter->data->caps,
339 /* If eventdev PMD did not provide ops, use default software
342 if (adapter->ops == NULL)
343 adapter->ops = &swtim_ops;
345 /* Set fast-path function pointers */
346 adapter->arm_burst = adapter->ops->arm_burst;
347 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
348 adapter->cancel_burst = adapter->ops->cancel_burst;
350 adapter->allocated = 1;
356 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
360 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
361 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
363 if (adapter->data->started == 1) {
364 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
365 "before freeing", adapter->data->id);
369 /* free impl priv data */
370 ret = adapter->ops->uninit(adapter);
374 /* free shared data area */
375 ret = rte_memzone_free(adapter->data->mz);
379 adapter->data = NULL;
380 adapter->allocated = 0;
386 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
387 uint32_t *service_id)
389 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
391 if (adapter->data->service_inited && service_id != NULL)
392 *service_id = adapter->data->service_id;
394 return adapter->data->service_inited ? 0 : -ESRCH;
398 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
399 struct rte_event_timer_adapter_stats *stats)
401 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
402 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
406 return adapter->ops->stats_get(adapter, stats);
410 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
412 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
413 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
414 return adapter->ops->stats_reset(adapter);
418 * Software event timer adapter buffer helper functions
421 #define NSECPERSEC 1E9
423 /* Optimizations used to index into the buffer require that the buffer size
426 #define EVENT_BUFFER_SZ 4096
427 #define EVENT_BUFFER_BATCHSZ 32
428 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
430 #define EXP_TIM_BUF_SZ 128
432 struct event_buffer {
435 struct rte_event events[EVENT_BUFFER_SZ];
436 } __rte_cache_aligned;
439 event_buffer_full(struct event_buffer *bufp)
441 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
445 event_buffer_batch_ready(struct event_buffer *bufp)
447 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
451 event_buffer_init(struct event_buffer *bufp)
453 bufp->head = bufp->tail = 0;
454 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
458 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
461 struct rte_event *buf_eventp;
463 if (event_buffer_full(bufp))
466 /* Instead of modulus, bitwise AND with mask to get head_idx. */
467 head_idx = bufp->head & EVENT_BUFFER_MASK;
468 buf_eventp = &bufp->events[head_idx];
469 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
471 /* Wrap automatically when overflow occurs. */
478 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
479 uint16_t *nb_events_flushed,
480 uint16_t *nb_events_inv)
482 struct rte_event *events = bufp->events;
483 size_t head_idx, tail_idx;
486 /* Instead of modulus, bitwise AND with mask to get index. */
487 head_idx = bufp->head & EVENT_BUFFER_MASK;
488 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
490 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
492 /* Determine the largest contigous run we can attempt to enqueue to the
495 if (head_idx > tail_idx)
496 n = head_idx - tail_idx;
497 else if (head_idx < tail_idx)
498 n = EVENT_BUFFER_SZ - tail_idx;
499 else if (event_buffer_full(bufp))
500 n = EVENT_BUFFER_SZ - tail_idx;
502 *nb_events_flushed = 0;
506 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
509 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
510 &events[tail_idx], n);
511 if (*nb_events_flushed != n) {
512 if (rte_errno == EINVAL) {
513 EVTIM_LOG_ERR("failed to enqueue invalid event - "
516 } else if (rte_errno == ENOSPC)
520 if (*nb_events_flushed > 0)
521 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
522 "device", *nb_events_flushed);
524 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
528 * Software event timer adapter implementation
531 /* Identifier of service executing timer management logic. */
533 /* The cycle count at which the adapter should next tick */
534 uint64_t next_tick_cycles;
535 /* The tick resolution used by adapter instance. May have been
536 * adjusted from what user requested
538 uint64_t timer_tick_ns;
539 /* Maximum timeout in nanoseconds allowed by adapter instance. */
541 /* Buffered timer expiry events to be enqueued to an event device. */
542 struct event_buffer buffer;
544 struct rte_event_timer_adapter_stats stats;
545 /* Mempool of timer objects */
546 struct rte_mempool *tim_pool;
547 /* Back pointer for convenience */
548 struct rte_event_timer_adapter *adapter;
549 /* Identifier of timer data instance */
550 uint32_t timer_data_id;
551 /* Track which cores have actually armed a timer */
554 } __rte_cache_aligned in_use[RTE_MAX_LCORE];
555 /* Track which cores' timer lists should be polled */
556 unsigned int poll_lcores[RTE_MAX_LCORE];
557 /* The number of lists that should be polled */
559 /* Timers which have expired and can be returned to a mempool */
560 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
561 /* The number of timers that can be returned to a mempool */
562 size_t n_expired_timers;
565 static inline struct swtim *
566 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
568 return adapter->data->adapter_priv;
572 swtim_callback(struct rte_timer *tim)
574 struct rte_event_timer *evtim = tim->arg;
575 struct rte_event_timer_adapter *adapter;
576 unsigned int lcore = rte_lcore_id();
578 uint16_t nb_evs_flushed = 0;
579 uint16_t nb_evs_invalid = 0;
583 opaque = evtim->impl_opaque[1];
584 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
585 sw = swtim_pmd_priv(adapter);
587 ret = event_buffer_add(&sw->buffer, &evtim->ev);
589 /* If event buffer is full, put timer back in list with
590 * immediate expiry value, so that we process it again on the
593 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
596 EVTIM_LOG_DBG("event buffer full, failed to reset "
597 "timer with immediate expiry value");
599 sw->stats.evtim_retry_count++;
600 EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
601 "with immediate expiry value");
604 if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore].v)))
605 sw->poll_lcores[sw->n_poll_lcores++] = lcore;
607 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
609 /* Empty the buffer here, if necessary, to free older expired
612 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
613 rte_mempool_put_bulk(sw->tim_pool,
614 (void **)sw->expired_timers,
615 sw->n_expired_timers);
616 sw->n_expired_timers = 0;
619 sw->expired_timers[sw->n_expired_timers++] = tim;
620 sw->stats.evtim_exp_count++;
622 evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
625 if (event_buffer_batch_ready(&sw->buffer)) {
626 event_buffer_flush(&sw->buffer,
627 adapter->data->event_dev_id,
628 adapter->data->event_port_id,
632 sw->stats.ev_enq_count += nb_evs_flushed;
633 sw->stats.ev_inv_count += nb_evs_invalid;
637 static __rte_always_inline uint64_t
638 get_timeout_cycles(struct rte_event_timer *evtim,
639 const struct rte_event_timer_adapter *adapter)
641 struct swtim *sw = swtim_pmd_priv(adapter);
642 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
643 return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
646 /* This function returns true if one or more (adapter) ticks have occurred since
647 * the last time it was called.
650 swtim_did_tick(struct swtim *sw)
652 uint64_t cycles_per_adapter_tick, start_cycles;
653 uint64_t *next_tick_cyclesp;
655 next_tick_cyclesp = &sw->next_tick_cycles;
656 cycles_per_adapter_tick = sw->timer_tick_ns *
657 (rte_get_timer_hz() / NSECPERSEC);
658 start_cycles = rte_get_timer_cycles();
660 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
661 * execute, and set things going.
664 if (start_cycles >= *next_tick_cyclesp) {
665 /* Snap the current cycle count to the preceding adapter tick
668 start_cycles -= start_cycles % cycles_per_adapter_tick;
669 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
677 /* Check that event timer timeout value is in range */
678 static __rte_always_inline int
679 check_timeout(struct rte_event_timer *evtim,
680 const struct rte_event_timer_adapter *adapter)
683 struct swtim *sw = swtim_pmd_priv(adapter);
685 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
686 if (tmo_nsec > sw->max_tmo_ns)
688 if (tmo_nsec < sw->timer_tick_ns)
694 /* Check that event timer event queue sched type matches destination event queue
697 static __rte_always_inline int
698 check_destination_event_queue(struct rte_event_timer *evtim,
699 const struct rte_event_timer_adapter *adapter)
704 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
706 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
709 if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
717 swtim_service_func(void *arg)
719 struct rte_event_timer_adapter *adapter = arg;
720 struct swtim *sw = swtim_pmd_priv(adapter);
721 uint16_t nb_evs_flushed = 0;
722 uint16_t nb_evs_invalid = 0;
724 if (swtim_did_tick(sw)) {
725 rte_timer_alt_manage(sw->timer_data_id,
730 /* Return expired timer objects back to mempool */
731 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
732 sw->n_expired_timers);
733 sw->n_expired_timers = 0;
735 event_buffer_flush(&sw->buffer,
736 adapter->data->event_dev_id,
737 adapter->data->event_port_id,
741 sw->stats.ev_enq_count += nb_evs_flushed;
742 sw->stats.ev_inv_count += nb_evs_invalid;
743 sw->stats.adapter_tick_count++;
749 /* The adapter initialization function rounds the mempool size up to the next
750 * power of 2, so we can take the difference between that value and what the
751 * user requested, and use the space for caches. This avoids a scenario where a
752 * user can't arm the number of timers the adapter was configured with because
753 * mempool objects have been lost to caches.
755 * nb_actual should always be a power of 2, so we can iterate over the powers
756 * of 2 to see what the largest cache size we can use is.
759 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
768 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
769 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
770 size <= nb_actual / 1.5)
780 swtim_init(struct rte_event_timer_adapter *adapter)
785 struct rte_service_spec service;
787 /* Allocate storage for private data area */
788 #define SWTIM_NAMESIZE 32
789 char swtim_name[SWTIM_NAMESIZE];
790 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
792 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
793 adapter->data->socket_id);
795 EVTIM_LOG_ERR("failed to allocate space for private data");
800 /* Connect storage to adapter instance */
801 adapter->data->adapter_priv = sw;
802 sw->adapter = adapter;
804 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
805 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
807 /* Create a timer pool */
808 char pool_name[SWTIM_NAMESIZE];
809 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
811 /* Optimal mempool size is a power of 2 minus one */
812 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
813 int pool_size = nb_timers - 1;
814 int cache_size = compute_msg_mempool_cache_size(
815 adapter->data->conf.nb_timers, nb_timers);
816 flags = 0; /* pool is multi-producer, multi-consumer */
817 sw->tim_pool = rte_mempool_create(pool_name, pool_size,
818 sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
819 NULL, NULL, adapter->data->socket_id, flags);
820 if (sw->tim_pool == NULL) {
821 EVTIM_LOG_ERR("failed to create timer object mempool");
826 /* Initialize the variables that track in-use timer lists */
827 for (i = 0; i < RTE_MAX_LCORE; i++)
828 rte_atomic16_init(&sw->in_use[i].v);
830 /* Initialize the timer subsystem and allocate timer data instance */
831 ret = rte_timer_subsystem_init();
833 if (ret != -EALREADY) {
834 EVTIM_LOG_ERR("failed to initialize timer subsystem");
840 ret = rte_timer_data_alloc(&sw->timer_data_id);
842 EVTIM_LOG_ERR("failed to allocate timer data instance");
847 /* Initialize timer event buffer */
848 event_buffer_init(&sw->buffer);
850 sw->adapter = adapter;
852 /* Register a service component to run adapter logic */
853 memset(&service, 0, sizeof(service));
854 snprintf(service.name, RTE_SERVICE_NAME_MAX,
855 "swtim_svc_%"PRIu8, adapter->data->id);
856 service.socket_id = adapter->data->socket_id;
857 service.callback = swtim_service_func;
858 service.callback_userdata = adapter;
859 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
860 ret = rte_service_component_register(&service, &sw->service_id);
862 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
863 ": err = %d", service.name, sw->service_id,
870 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
873 adapter->data->service_id = sw->service_id;
874 adapter->data->service_inited = 1;
878 rte_mempool_free(sw->tim_pool);
885 swtim_free_tim(struct rte_timer *tim, void *arg)
887 struct swtim *sw = arg;
889 rte_mempool_put(sw->tim_pool, tim);
892 /* Traverse the list of outstanding timers and put them back in the mempool
893 * before freeing the adapter to avoid leaking the memory.
896 swtim_uninit(struct rte_event_timer_adapter *adapter)
899 struct swtim *sw = swtim_pmd_priv(adapter);
901 /* Free outstanding timers */
902 rte_timer_stop_all(sw->timer_data_id,
908 ret = rte_service_component_unregister(sw->service_id);
910 EVTIM_LOG_ERR("failed to unregister service component");
914 rte_mempool_free(sw->tim_pool);
916 adapter->data->adapter_priv = NULL;
921 static inline int32_t
922 get_mapped_count_for_service(uint32_t service_id)
924 int32_t core_count, i, mapped_count = 0;
925 uint32_t lcore_arr[RTE_MAX_LCORE];
927 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
929 for (i = 0; i < core_count; i++)
930 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
937 swtim_start(const struct rte_event_timer_adapter *adapter)
940 struct swtim *sw = swtim_pmd_priv(adapter);
942 /* Mapping the service to more than one service core can introduce
943 * delays while one thread is waiting to acquire a lock, so only allow
944 * one core to be mapped to the service.
946 * Note: the service could be modified such that it spreads cores to
947 * poll over multiple service instances.
949 mapped_count = get_mapped_count_for_service(sw->service_id);
951 if (mapped_count != 1)
952 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
954 return rte_service_component_runstate_set(sw->service_id, 1);
958 swtim_stop(const struct rte_event_timer_adapter *adapter)
961 struct swtim *sw = swtim_pmd_priv(adapter);
963 ret = rte_service_component_runstate_set(sw->service_id, 0);
967 /* Wait for the service to complete its final iteration */
968 while (rte_service_may_be_active(sw->service_id))
975 swtim_get_info(const struct rte_event_timer_adapter *adapter,
976 struct rte_event_timer_adapter_info *adapter_info)
978 struct swtim *sw = swtim_pmd_priv(adapter);
979 adapter_info->min_resolution_ns = sw->timer_tick_ns;
980 adapter_info->max_tmo_ns = sw->max_tmo_ns;
984 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
985 struct rte_event_timer_adapter_stats *stats)
987 struct swtim *sw = swtim_pmd_priv(adapter);
988 *stats = sw->stats; /* structure copy */
993 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
995 struct swtim *sw = swtim_pmd_priv(adapter);
996 memset(&sw->stats, 0, sizeof(sw->stats));
1001 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1002 struct rte_event_timer **evtims,
1006 struct swtim *sw = swtim_pmd_priv(adapter);
1007 uint32_t lcore_id = rte_lcore_id();
1008 struct rte_timer *tim, *tims[nb_evtims];
1011 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1012 /* Check that the service is running. */
1013 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1019 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1020 * the highest lcore to insert such timers into
1022 if (lcore_id == LCORE_ID_ANY)
1023 lcore_id = RTE_MAX_LCORE - 1;
1025 /* If this is the first time we're arming an event timer on this lcore,
1026 * mark this lcore as "in use"; this will cause the service
1027 * function to process the timer list that corresponds to this lcore.
1029 if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore_id].v))) {
1030 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1032 sw->poll_lcores[sw->n_poll_lcores] = lcore_id;
1033 ++sw->n_poll_lcores;
1036 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1043 for (i = 0; i < nb_evtims; i++) {
1044 /* Don't modify the event timer state in these cases */
1045 if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
1046 rte_errno = EALREADY;
1048 } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
1049 evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
1054 ret = check_timeout(evtims[i], adapter);
1055 if (unlikely(ret == -1)) {
1056 evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
1059 } else if (unlikely(ret == -2)) {
1060 evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
1065 if (unlikely(check_destination_event_queue(evtims[i],
1067 evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1073 rte_timer_init(tim);
1075 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1076 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1078 cycles = get_timeout_cycles(evtims[i], adapter);
1079 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1080 SINGLE, lcore_id, NULL, evtims[i]);
1082 /* tim was in RUNNING or CONFIG state */
1083 evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1088 EVTIM_LOG_DBG("armed an event timer");
1089 evtims[i]->state = RTE_EVENT_TIMER_ARMED;
1093 rte_mempool_put_bulk(sw->tim_pool,
1094 (void **)&tims[i], nb_evtims - i);
1100 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1101 struct rte_event_timer **evtims,
1104 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1108 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1109 struct rte_event_timer **evtims,
1113 struct rte_timer *timp;
1115 struct swtim *sw = swtim_pmd_priv(adapter);
1117 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1118 /* Check that the service is running. */
1119 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1125 for (i = 0; i < nb_evtims; i++) {
1126 /* Don't modify the event timer state in these cases */
1127 if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
1128 rte_errno = EALREADY;
1130 } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
1137 opaque = evtims[i]->impl_opaque[0];
1138 timp = (struct rte_timer *)(uintptr_t)opaque;
1139 RTE_ASSERT(timp != NULL);
1141 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1143 /* Timer is running or being configured */
1148 rte_mempool_put(sw->tim_pool, (void **)timp);
1150 evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
1151 evtims[i]->impl_opaque[0] = 0;
1152 evtims[i]->impl_opaque[1] = 0;
1161 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1162 struct rte_event_timer **evtims,
1163 uint64_t timeout_ticks,
1168 for (i = 0; i < nb_evtims; i++)
1169 evtims[i]->timeout_ticks = timeout_ticks;
1171 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1174 static const struct rte_event_timer_adapter_ops swtim_ops = {
1176 .uninit = swtim_uninit,
1177 .start = swtim_start,
1179 .get_info = swtim_get_info,
1180 .stats_get = swtim_stats_get,
1181 .stats_reset = swtim_stats_reset,
1182 .arm_burst = swtim_arm_burst,
1183 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1184 .cancel_burst = swtim_cancel_burst,
1187 RTE_INIT(event_timer_adapter_init_log)
1189 evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
1190 if (evtim_logtype >= 0)
1191 rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
1193 evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
1195 if (evtim_buffer_logtype >= 0)
1196 rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
1198 evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
1199 if (evtim_svc_logtype >= 0)
1200 rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);