1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
14 #include <rte_common.h>
15 #include <rte_timer.h>
16 #include <rte_service_component.h>
18 #include "event_timer_adapter_pmd.h"
19 #include "eventdev_pmd.h"
20 #include "rte_event_timer_adapter.h"
21 #include "rte_eventdev.h"
22 #include "eventdev_trace.h"
24 #define DATA_MZ_NAME_MAX_LEN 64
25 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
27 RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
28 RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
29 RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
31 static struct rte_event_timer_adapter *adapters;
33 static const struct event_timer_adapter_ops swtim_ops;
35 #define EVTIM_LOG(level, logtype, ...) \
36 rte_log(RTE_LOG_ ## level, logtype, \
37 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
38 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
40 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
42 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
43 #define EVTIM_LOG_DBG(...) \
44 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
45 #define EVTIM_BUF_LOG_DBG(...) \
46 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
47 #define EVTIM_SVC_LOG_DBG(...) \
48 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
50 #define EVTIM_LOG_DBG(...) (void)0
51 #define EVTIM_BUF_LOG_DBG(...) (void)0
52 #define EVTIM_SVC_LOG_DBG(...) (void)0
56 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
59 struct rte_event_timer_adapter *adapter;
60 struct rte_eventdev *dev;
61 struct rte_event_dev_config dev_conf;
62 struct rte_event_port_conf *port_conf, def_port_conf = {0};
68 RTE_SET_USED(event_dev_id);
70 adapter = &adapters[id];
71 dev = &rte_eventdevs[adapter->data->event_dev_id];
72 dev_id = dev->data->dev_id;
73 dev_conf = dev->data->dev_conf;
75 started = dev->data->dev_started;
77 rte_event_dev_stop(dev_id);
79 port_id = dev_conf.nb_event_ports;
80 dev_conf.nb_event_ports += 1;
81 ret = rte_event_dev_configure(dev_id, &dev_conf);
83 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
85 if (rte_event_dev_start(dev_id))
94 port_conf = &def_port_conf;
95 ret = rte_event_port_default_conf_get(dev_id, port_id,
101 ret = rte_event_port_setup(dev_id, port_id, port_conf);
103 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
108 *event_port_id = port_id;
111 ret = rte_event_dev_start(dev_id);
116 struct rte_event_timer_adapter *
117 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
119 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
123 struct rte_event_timer_adapter *
124 rte_event_timer_adapter_create_ext(
125 const struct rte_event_timer_adapter_conf *conf,
126 rte_event_timer_adapter_port_conf_cb_t conf_cb,
130 struct rte_event_timer_adapter *adapter;
131 const struct rte_memzone *mz;
132 char mz_name[DATA_MZ_NAME_MAX_LEN];
134 struct rte_eventdev *dev;
136 if (adapters == NULL) {
137 adapters = rte_zmalloc("Eventdev",
138 sizeof(struct rte_event_timer_adapter) *
139 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
140 RTE_CACHE_LINE_SIZE);
141 if (adapters == NULL) {
152 /* Check eventdev ID */
153 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
157 dev = &rte_eventdevs[conf->event_dev_id];
159 adapter_id = conf->timer_adapter_id;
161 /* Check that adapter_id is in range */
162 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
167 /* Check adapter ID not already allocated */
168 adapter = &adapters[adapter_id];
169 if (adapter->allocated) {
174 /* Create shared data area. */
175 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
176 if (n >= (int)sizeof(mz_name)) {
180 mz = rte_memzone_reserve(mz_name,
181 sizeof(struct rte_event_timer_adapter_data),
184 /* rte_errno set by rte_memzone_reserve */
187 adapter->data = mz->addr;
188 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
190 adapter->data->mz = mz;
191 adapter->data->event_dev_id = conf->event_dev_id;
192 adapter->data->id = adapter_id;
193 adapter->data->socket_id = conf->socket_id;
194 adapter->data->conf = *conf; /* copy conf structure */
196 /* Query eventdev PMD for timer adapter capabilities and ops */
197 ret = dev->dev_ops->timer_adapter_caps_get(dev,
198 adapter->data->conf.flags,
199 &adapter->data->caps,
206 if (!(adapter->data->caps &
207 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
208 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
209 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
210 &adapter->data->event_port_id, conf_arg);
217 /* If eventdev PMD did not provide ops, use default software
220 if (adapter->ops == NULL)
221 adapter->ops = &swtim_ops;
223 /* Allow driver to do some setup */
224 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
225 ret = adapter->ops->init(adapter);
231 /* Set fast-path function pointers */
232 adapter->arm_burst = adapter->ops->arm_burst;
233 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
234 adapter->cancel_burst = adapter->ops->cancel_burst;
236 adapter->allocated = 1;
238 rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
243 rte_memzone_free(adapter->data->mz);
248 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
249 struct rte_event_timer_adapter_info *adapter_info)
251 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
253 if (adapter->ops->get_info)
254 /* let driver set values it knows */
255 adapter->ops->get_info(adapter, adapter_info);
257 /* Set common values */
258 adapter_info->conf = adapter->data->conf;
259 adapter_info->event_dev_port_id = adapter->data->event_port_id;
260 adapter_info->caps = adapter->data->caps;
266 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
270 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
271 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
273 if (adapter->data->started) {
274 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
279 ret = adapter->ops->start(adapter);
283 adapter->data->started = 1;
284 rte_eventdev_trace_timer_adapter_start(adapter);
289 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
293 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
294 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
296 if (adapter->data->started == 0) {
297 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
302 ret = adapter->ops->stop(adapter);
306 adapter->data->started = 0;
307 rte_eventdev_trace_timer_adapter_stop(adapter);
311 struct rte_event_timer_adapter *
312 rte_event_timer_adapter_lookup(uint16_t adapter_id)
314 char name[DATA_MZ_NAME_MAX_LEN];
315 const struct rte_memzone *mz;
316 struct rte_event_timer_adapter_data *data;
317 struct rte_event_timer_adapter *adapter;
319 struct rte_eventdev *dev;
321 if (adapters == NULL) {
322 adapters = rte_zmalloc("Eventdev",
323 sizeof(struct rte_event_timer_adapter) *
324 RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
325 RTE_CACHE_LINE_SIZE);
326 if (adapters == NULL) {
332 if (adapters[adapter_id].allocated)
333 return &adapters[adapter_id]; /* Adapter is already loaded */
335 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
336 mz = rte_memzone_lookup(name);
344 adapter = &adapters[data->id];
345 adapter->data = data;
347 dev = &rte_eventdevs[adapter->data->event_dev_id];
349 /* Query eventdev PMD for timer adapter capabilities and ops */
350 ret = dev->dev_ops->timer_adapter_caps_get(dev,
351 adapter->data->conf.flags,
352 &adapter->data->caps,
359 /* If eventdev PMD did not provide ops, use default software
362 if (adapter->ops == NULL)
363 adapter->ops = &swtim_ops;
365 /* Set fast-path function pointers */
366 adapter->arm_burst = adapter->ops->arm_burst;
367 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
368 adapter->cancel_burst = adapter->ops->cancel_burst;
370 adapter->allocated = 1;
376 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
380 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
381 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
383 if (adapter->data->started == 1) {
384 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
385 "before freeing", adapter->data->id);
389 /* free impl priv data */
390 ret = adapter->ops->uninit(adapter);
394 /* free shared data area */
395 ret = rte_memzone_free(adapter->data->mz);
399 adapter->data = NULL;
400 adapter->allocated = 0;
403 for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
404 if (adapters[i].allocated)
405 ret = adapters[i].allocated;
412 rte_eventdev_trace_timer_adapter_free(adapter);
417 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
418 uint32_t *service_id)
420 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
422 if (adapter->data->service_inited && service_id != NULL)
423 *service_id = adapter->data->service_id;
425 return adapter->data->service_inited ? 0 : -ESRCH;
429 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
430 struct rte_event_timer_adapter_stats *stats)
432 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
433 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
437 return adapter->ops->stats_get(adapter, stats);
441 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
443 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
444 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
445 return adapter->ops->stats_reset(adapter);
449 * Software event timer adapter buffer helper functions
452 #define NSECPERSEC 1E9
454 /* Optimizations used to index into the buffer require that the buffer size
457 #define EVENT_BUFFER_SZ 4096
458 #define EVENT_BUFFER_BATCHSZ 32
459 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
461 #define EXP_TIM_BUF_SZ 128
463 struct event_buffer {
466 struct rte_event events[EVENT_BUFFER_SZ];
467 } __rte_cache_aligned;
470 event_buffer_full(struct event_buffer *bufp)
472 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
476 event_buffer_batch_ready(struct event_buffer *bufp)
478 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
482 event_buffer_init(struct event_buffer *bufp)
484 bufp->head = bufp->tail = 0;
485 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
489 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
492 struct rte_event *buf_eventp;
494 if (event_buffer_full(bufp))
497 /* Instead of modulus, bitwise AND with mask to get head_idx. */
498 head_idx = bufp->head & EVENT_BUFFER_MASK;
499 buf_eventp = &bufp->events[head_idx];
500 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
502 /* Wrap automatically when overflow occurs. */
509 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
510 uint16_t *nb_events_flushed,
511 uint16_t *nb_events_inv)
513 struct rte_event *events = bufp->events;
514 size_t head_idx, tail_idx;
517 /* Instead of modulus, bitwise AND with mask to get index. */
518 head_idx = bufp->head & EVENT_BUFFER_MASK;
519 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
521 RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
523 /* Determine the largest contiguous run we can attempt to enqueue to the
526 if (head_idx > tail_idx)
527 n = head_idx - tail_idx;
528 else if (head_idx < tail_idx)
529 n = EVENT_BUFFER_SZ - tail_idx;
530 else if (event_buffer_full(bufp))
531 n = EVENT_BUFFER_SZ - tail_idx;
533 *nb_events_flushed = 0;
537 n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
540 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
541 &events[tail_idx], n);
542 if (*nb_events_flushed != n) {
543 if (rte_errno == EINVAL) {
544 EVTIM_LOG_ERR("failed to enqueue invalid event - "
547 } else if (rte_errno == ENOSPC)
551 if (*nb_events_flushed > 0)
552 EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
553 "device", *nb_events_flushed);
555 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
559 * Software event timer adapter implementation
562 /* Identifier of service executing timer management logic. */
564 /* The cycle count at which the adapter should next tick */
565 uint64_t next_tick_cycles;
566 /* The tick resolution used by adapter instance. May have been
567 * adjusted from what user requested
569 uint64_t timer_tick_ns;
570 /* Maximum timeout in nanoseconds allowed by adapter instance. */
572 /* Buffered timer expiry events to be enqueued to an event device. */
573 struct event_buffer buffer;
575 struct rte_event_timer_adapter_stats stats;
576 /* Mempool of timer objects */
577 struct rte_mempool *tim_pool;
578 /* Back pointer for convenience */
579 struct rte_event_timer_adapter *adapter;
580 /* Identifier of timer data instance */
581 uint32_t timer_data_id;
582 /* Track which cores have actually armed a timer */
585 } __rte_cache_aligned in_use[RTE_MAX_LCORE];
586 /* Track which cores' timer lists should be polled */
587 unsigned int poll_lcores[RTE_MAX_LCORE];
588 /* The number of lists that should be polled */
590 /* Timers which have expired and can be returned to a mempool */
591 struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
592 /* The number of timers that can be returned to a mempool */
593 size_t n_expired_timers;
596 static inline struct swtim *
597 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
599 return adapter->data->adapter_priv;
603 swtim_callback(struct rte_timer *tim)
605 struct rte_event_timer *evtim = tim->arg;
606 struct rte_event_timer_adapter *adapter;
607 unsigned int lcore = rte_lcore_id();
609 uint16_t nb_evs_flushed = 0;
610 uint16_t nb_evs_invalid = 0;
615 opaque = evtim->impl_opaque[1];
616 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
617 sw = swtim_pmd_priv(adapter);
619 ret = event_buffer_add(&sw->buffer, &evtim->ev);
621 /* If event buffer is full, put timer back in list with
622 * immediate expiry value, so that we process it again on the
625 ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
628 EVTIM_LOG_DBG("event buffer full, failed to reset "
629 "timer with immediate expiry value");
631 sw->stats.evtim_retry_count++;
632 EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
633 "with immediate expiry value");
636 if (unlikely(sw->in_use[lcore].v == 0)) {
637 sw->in_use[lcore].v = 1;
638 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
640 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
644 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
646 /* Empty the buffer here, if necessary, to free older expired
649 if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
650 rte_mempool_put_bulk(sw->tim_pool,
651 (void **)sw->expired_timers,
652 sw->n_expired_timers);
653 sw->n_expired_timers = 0;
656 sw->expired_timers[sw->n_expired_timers++] = tim;
657 sw->stats.evtim_exp_count++;
659 __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
663 if (event_buffer_batch_ready(&sw->buffer)) {
664 event_buffer_flush(&sw->buffer,
665 adapter->data->event_dev_id,
666 adapter->data->event_port_id,
670 sw->stats.ev_enq_count += nb_evs_flushed;
671 sw->stats.ev_inv_count += nb_evs_invalid;
675 static __rte_always_inline uint64_t
676 get_timeout_cycles(struct rte_event_timer *evtim,
677 const struct rte_event_timer_adapter *adapter)
679 struct swtim *sw = swtim_pmd_priv(adapter);
680 uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
681 return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
684 /* This function returns true if one or more (adapter) ticks have occurred since
685 * the last time it was called.
688 swtim_did_tick(struct swtim *sw)
690 uint64_t cycles_per_adapter_tick, start_cycles;
691 uint64_t *next_tick_cyclesp;
693 next_tick_cyclesp = &sw->next_tick_cycles;
694 cycles_per_adapter_tick = sw->timer_tick_ns *
695 (rte_get_timer_hz() / NSECPERSEC);
696 start_cycles = rte_get_timer_cycles();
698 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
699 * execute, and set things going.
702 if (start_cycles >= *next_tick_cyclesp) {
703 /* Snap the current cycle count to the preceding adapter tick
706 start_cycles -= start_cycles % cycles_per_adapter_tick;
707 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
715 /* Check that event timer timeout value is in range */
716 static __rte_always_inline int
717 check_timeout(struct rte_event_timer *evtim,
718 const struct rte_event_timer_adapter *adapter)
721 struct swtim *sw = swtim_pmd_priv(adapter);
723 tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
724 if (tmo_nsec > sw->max_tmo_ns)
726 if (tmo_nsec < sw->timer_tick_ns)
732 /* Check that event timer event queue sched type matches destination event queue
735 static __rte_always_inline int
736 check_destination_event_queue(struct rte_event_timer *evtim,
737 const struct rte_event_timer_adapter *adapter)
742 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
744 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
747 if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
755 swtim_service_func(void *arg)
757 struct rte_event_timer_adapter *adapter = arg;
758 struct swtim *sw = swtim_pmd_priv(adapter);
759 uint16_t nb_evs_flushed = 0;
760 uint16_t nb_evs_invalid = 0;
762 if (swtim_did_tick(sw)) {
763 rte_timer_alt_manage(sw->timer_data_id,
768 /* Return expired timer objects back to mempool */
769 rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
770 sw->n_expired_timers);
771 sw->n_expired_timers = 0;
773 event_buffer_flush(&sw->buffer,
774 adapter->data->event_dev_id,
775 adapter->data->event_port_id,
779 sw->stats.ev_enq_count += nb_evs_flushed;
780 sw->stats.ev_inv_count += nb_evs_invalid;
781 sw->stats.adapter_tick_count++;
784 rte_event_maintain(adapter->data->event_dev_id,
785 adapter->data->event_port_id, 0);
790 /* The adapter initialization function rounds the mempool size up to the next
791 * power of 2, so we can take the difference between that value and what the
792 * user requested, and use the space for caches. This avoids a scenario where a
793 * user can't arm the number of timers the adapter was configured with because
794 * mempool objects have been lost to caches.
796 * nb_actual should always be a power of 2, so we can iterate over the powers
797 * of 2 to see what the largest cache size we can use is.
800 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
809 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
810 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
811 size <= nb_actual / 1.5)
821 swtim_init(struct rte_event_timer_adapter *adapter)
826 struct rte_service_spec service;
828 /* Allocate storage for private data area */
829 #define SWTIM_NAMESIZE 32
830 char swtim_name[SWTIM_NAMESIZE];
831 snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
833 sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
834 adapter->data->socket_id);
836 EVTIM_LOG_ERR("failed to allocate space for private data");
841 /* Connect storage to adapter instance */
842 adapter->data->adapter_priv = sw;
843 sw->adapter = adapter;
845 sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
846 sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
848 /* Create a timer pool */
849 char pool_name[SWTIM_NAMESIZE];
850 snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
852 /* Optimal mempool size is a power of 2 minus one */
853 uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
854 int pool_size = nb_timers - 1;
855 int cache_size = compute_msg_mempool_cache_size(
856 adapter->data->conf.nb_timers, nb_timers);
857 flags = 0; /* pool is multi-producer, multi-consumer */
858 sw->tim_pool = rte_mempool_create(pool_name, pool_size,
859 sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
860 NULL, NULL, adapter->data->socket_id, flags);
861 if (sw->tim_pool == NULL) {
862 EVTIM_LOG_ERR("failed to create timer object mempool");
867 /* Initialize the variables that track in-use timer lists */
868 for (i = 0; i < RTE_MAX_LCORE; i++)
871 /* Initialize the timer subsystem and allocate timer data instance */
872 ret = rte_timer_subsystem_init();
874 if (ret != -EALREADY) {
875 EVTIM_LOG_ERR("failed to initialize timer subsystem");
881 ret = rte_timer_data_alloc(&sw->timer_data_id);
883 EVTIM_LOG_ERR("failed to allocate timer data instance");
888 /* Initialize timer event buffer */
889 event_buffer_init(&sw->buffer);
891 sw->adapter = adapter;
893 /* Register a service component to run adapter logic */
894 memset(&service, 0, sizeof(service));
895 snprintf(service.name, RTE_SERVICE_NAME_MAX,
896 "swtim_svc_%"PRIu8, adapter->data->id);
897 service.socket_id = adapter->data->socket_id;
898 service.callback = swtim_service_func;
899 service.callback_userdata = adapter;
900 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
901 ret = rte_service_component_register(&service, &sw->service_id);
903 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
904 ": err = %d", service.name, sw->service_id,
911 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
914 adapter->data->service_id = sw->service_id;
915 adapter->data->service_inited = 1;
919 rte_mempool_free(sw->tim_pool);
926 swtim_free_tim(struct rte_timer *tim, void *arg)
928 struct swtim *sw = arg;
930 rte_mempool_put(sw->tim_pool, tim);
933 /* Traverse the list of outstanding timers and put them back in the mempool
934 * before freeing the adapter to avoid leaking the memory.
937 swtim_uninit(struct rte_event_timer_adapter *adapter)
940 struct swtim *sw = swtim_pmd_priv(adapter);
942 /* Free outstanding timers */
943 rte_timer_stop_all(sw->timer_data_id,
949 ret = rte_service_component_unregister(sw->service_id);
951 EVTIM_LOG_ERR("failed to unregister service component");
955 rte_mempool_free(sw->tim_pool);
957 adapter->data->adapter_priv = NULL;
962 static inline int32_t
963 get_mapped_count_for_service(uint32_t service_id)
965 int32_t core_count, i, mapped_count = 0;
966 uint32_t lcore_arr[RTE_MAX_LCORE];
968 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
970 for (i = 0; i < core_count; i++)
971 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
978 swtim_start(const struct rte_event_timer_adapter *adapter)
981 struct swtim *sw = swtim_pmd_priv(adapter);
983 /* Mapping the service to more than one service core can introduce
984 * delays while one thread is waiting to acquire a lock, so only allow
985 * one core to be mapped to the service.
987 * Note: the service could be modified such that it spreads cores to
988 * poll over multiple service instances.
990 mapped_count = get_mapped_count_for_service(sw->service_id);
992 if (mapped_count != 1)
993 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
995 return rte_service_component_runstate_set(sw->service_id, 1);
999 swtim_stop(const struct rte_event_timer_adapter *adapter)
1002 struct swtim *sw = swtim_pmd_priv(adapter);
1004 ret = rte_service_component_runstate_set(sw->service_id, 0);
1008 /* Wait for the service to complete its final iteration */
1009 while (rte_service_may_be_active(sw->service_id))
1016 swtim_get_info(const struct rte_event_timer_adapter *adapter,
1017 struct rte_event_timer_adapter_info *adapter_info)
1019 struct swtim *sw = swtim_pmd_priv(adapter);
1020 adapter_info->min_resolution_ns = sw->timer_tick_ns;
1021 adapter_info->max_tmo_ns = sw->max_tmo_ns;
1025 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1026 struct rte_event_timer_adapter_stats *stats)
1028 struct swtim *sw = swtim_pmd_priv(adapter);
1029 *stats = sw->stats; /* structure copy */
1034 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1036 struct swtim *sw = swtim_pmd_priv(adapter);
1037 memset(&sw->stats, 0, sizeof(sw->stats));
1042 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1043 struct rte_event_timer **evtims,
1047 struct swtim *sw = swtim_pmd_priv(adapter);
1048 uint32_t lcore_id = rte_lcore_id();
1049 struct rte_timer *tim, *tims[nb_evtims];
1052 /* Timer list for this lcore is not in use. */
1053 uint16_t exp_state = 0;
1054 enum rte_event_timer_state n_state;
1056 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1057 /* Check that the service is running. */
1058 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1064 /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1065 * the highest lcore to insert such timers into
1067 if (lcore_id == LCORE_ID_ANY)
1068 lcore_id = RTE_MAX_LCORE - 1;
1070 /* If this is the first time we're arming an event timer on this lcore,
1071 * mark this lcore as "in use"; this will cause the service
1072 * function to process the timer list that corresponds to this lcore.
1073 * The atomic compare-and-swap operation can prevent the race condition
1074 * on in_use flag between multiple non-EAL threads.
1076 if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1078 __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1079 EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1081 n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1083 __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1087 ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1094 for (i = 0; i < nb_evtims; i++) {
1095 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1096 if (n_state == RTE_EVENT_TIMER_ARMED) {
1097 rte_errno = EALREADY;
1099 } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1100 n_state == RTE_EVENT_TIMER_CANCELED)) {
1105 ret = check_timeout(evtims[i], adapter);
1106 if (unlikely(ret == -1)) {
1107 __atomic_store_n(&evtims[i]->state,
1108 RTE_EVENT_TIMER_ERROR_TOOLATE,
1112 } else if (unlikely(ret == -2)) {
1113 __atomic_store_n(&evtims[i]->state,
1114 RTE_EVENT_TIMER_ERROR_TOOEARLY,
1120 if (unlikely(check_destination_event_queue(evtims[i],
1122 __atomic_store_n(&evtims[i]->state,
1123 RTE_EVENT_TIMER_ERROR,
1130 rte_timer_init(tim);
1132 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1133 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1135 cycles = get_timeout_cycles(evtims[i], adapter);
1136 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1137 SINGLE, lcore_id, NULL, evtims[i]);
1139 /* tim was in RUNNING or CONFIG state */
1140 __atomic_store_n(&evtims[i]->state,
1141 RTE_EVENT_TIMER_ERROR,
1146 EVTIM_LOG_DBG("armed an event timer");
1147 /* RELEASE ordering guarantees the adapter specific value
1148 * changes observed before the update of state.
1150 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1155 rte_mempool_put_bulk(sw->tim_pool,
1156 (void **)&tims[i], nb_evtims - i);
1162 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1163 struct rte_event_timer **evtims,
1166 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1170 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1171 struct rte_event_timer **evtims,
1175 struct rte_timer *timp;
1177 struct swtim *sw = swtim_pmd_priv(adapter);
1178 enum rte_event_timer_state n_state;
1180 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1181 /* Check that the service is running. */
1182 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1188 for (i = 0; i < nb_evtims; i++) {
1189 /* Don't modify the event timer state in these cases */
1190 /* ACQUIRE ordering guarantees the access of implementation
1191 * specific opaque data under the correct state.
1193 n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1194 if (n_state == RTE_EVENT_TIMER_CANCELED) {
1195 rte_errno = EALREADY;
1197 } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1202 opaque = evtims[i]->impl_opaque[0];
1203 timp = (struct rte_timer *)(uintptr_t)opaque;
1204 RTE_ASSERT(timp != NULL);
1206 ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1208 /* Timer is running or being configured */
1213 rte_mempool_put(sw->tim_pool, (void **)timp);
1215 /* The RELEASE ordering here pairs with atomic ordering
1216 * to make sure the state update data observed between
1219 __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1227 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1228 struct rte_event_timer **evtims,
1229 uint64_t timeout_ticks,
1234 for (i = 0; i < nb_evtims; i++)
1235 evtims[i]->timeout_ticks = timeout_ticks;
1237 return __swtim_arm_burst(adapter, evtims, nb_evtims);
1240 static const struct event_timer_adapter_ops swtim_ops = {
1242 .uninit = swtim_uninit,
1243 .start = swtim_start,
1245 .get_info = swtim_get_info,
1246 .stats_get = swtim_stats_get,
1247 .stats_reset = swtim_stats_reset,
1248 .arm_burst = swtim_arm_burst,
1249 .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1250 .cancel_burst = swtim_cancel_burst,