1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation.
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31 static int evtim_logtype;
32 static int evtim_svc_logtype;
33 static int evtim_buffer_logtype;
35 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
37 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops;
39 #define EVTIM_LOG(level, logtype, ...) \
40 rte_log(RTE_LOG_ ## level, logtype, \
41 RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
60 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
63 struct rte_event_timer_adapter *adapter;
64 struct rte_eventdev *dev;
65 struct rte_event_dev_config dev_conf;
66 struct rte_event_port_conf *port_conf, def_port_conf = {0};
72 RTE_SET_USED(event_dev_id);
74 adapter = &adapters[id];
75 dev = &rte_eventdevs[adapter->data->event_dev_id];
76 dev_id = dev->data->dev_id;
77 dev_conf = dev->data->dev_conf;
79 started = dev->data->dev_started;
81 rte_event_dev_stop(dev_id);
83 port_id = dev_conf.nb_event_ports;
84 dev_conf.nb_event_ports += 1;
85 ret = rte_event_dev_configure(dev_id, &dev_conf);
87 EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
89 if (rte_event_dev_start(dev_id))
98 port_conf = &def_port_conf;
99 ret = rte_event_port_default_conf_get(dev_id, port_id,
105 ret = rte_event_port_setup(dev_id, port_id, port_conf);
107 EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
112 *event_port_id = port_id;
115 ret = rte_event_dev_start(dev_id);
120 struct rte_event_timer_adapter * __rte_experimental
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
123 return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
127 struct rte_event_timer_adapter * __rte_experimental
128 rte_event_timer_adapter_create_ext(
129 const struct rte_event_timer_adapter_conf *conf,
130 rte_event_timer_adapter_port_conf_cb_t conf_cb,
134 struct rte_event_timer_adapter *adapter;
135 const struct rte_memzone *mz;
136 char mz_name[DATA_MZ_NAME_MAX_LEN];
138 struct rte_eventdev *dev;
145 /* Check eventdev ID */
146 if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
150 dev = &rte_eventdevs[conf->event_dev_id];
152 adapter_id = conf->timer_adapter_id;
154 /* Check that adapter_id is in range */
155 if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
160 /* Check adapter ID not already allocated */
161 adapter = &adapters[adapter_id];
162 if (adapter->allocated) {
167 /* Create shared data area. */
168 n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
169 if (n >= (int)sizeof(mz_name)) {
173 mz = rte_memzone_reserve(mz_name,
174 sizeof(struct rte_event_timer_adapter_data),
177 /* rte_errno set by rte_memzone_reserve */
180 adapter->data = mz->addr;
181 memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
183 adapter->data->mz = mz;
184 adapter->data->event_dev_id = conf->event_dev_id;
185 adapter->data->id = adapter_id;
186 adapter->data->socket_id = conf->socket_id;
187 adapter->data->conf = *conf; /* copy conf structure */
189 /* Query eventdev PMD for timer adapter capabilities and ops */
190 ret = dev->dev_ops->timer_adapter_caps_get(dev,
191 adapter->data->conf.flags,
192 &adapter->data->caps,
199 if (!(adapter->data->caps &
200 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
201 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL);
202 ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
203 &adapter->data->event_port_id, conf_arg);
210 /* If eventdev PMD did not provide ops, use default software
213 if (adapter->ops == NULL)
214 adapter->ops = &sw_event_adapter_timer_ops;
216 /* Allow driver to do some setup */
217 FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP);
218 ret = adapter->ops->init(adapter);
224 /* Set fast-path function pointers */
225 adapter->arm_burst = adapter->ops->arm_burst;
226 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
227 adapter->cancel_burst = adapter->ops->cancel_burst;
229 adapter->allocated = 1;
234 rte_memzone_free(adapter->data->mz);
238 int __rte_experimental
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
240 struct rte_event_timer_adapter_info *adapter_info)
242 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
244 if (adapter->ops->get_info)
245 /* let driver set values it knows */
246 adapter->ops->get_info(adapter, adapter_info);
248 /* Set common values */
249 adapter_info->conf = adapter->data->conf;
250 adapter_info->event_dev_port_id = adapter->data->event_port_id;
251 adapter_info->caps = adapter->data->caps;
256 int __rte_experimental
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
261 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
262 FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
264 ret = adapter->ops->start(adapter);
268 adapter->data->started = 1;
273 int __rte_experimental
274 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
278 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
279 FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
281 if (adapter->data->started == 0) {
282 EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
287 ret = adapter->ops->stop(adapter);
291 adapter->data->started = 0;
296 struct rte_event_timer_adapter * __rte_experimental
297 rte_event_timer_adapter_lookup(uint16_t adapter_id)
299 char name[DATA_MZ_NAME_MAX_LEN];
300 const struct rte_memzone *mz;
301 struct rte_event_timer_adapter_data *data;
302 struct rte_event_timer_adapter *adapter;
304 struct rte_eventdev *dev;
306 if (adapters[adapter_id].allocated)
307 return &adapters[adapter_id]; /* Adapter is already loaded */
309 snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
310 mz = rte_memzone_lookup(name);
318 adapter = &adapters[data->id];
319 adapter->data = data;
321 dev = &rte_eventdevs[adapter->data->event_dev_id];
323 /* Query eventdev PMD for timer adapter capabilities and ops */
324 ret = dev->dev_ops->timer_adapter_caps_get(dev,
325 adapter->data->conf.flags,
326 &adapter->data->caps,
333 /* If eventdev PMD did not provide ops, use default software
336 if (adapter->ops == NULL)
337 adapter->ops = &sw_event_adapter_timer_ops;
339 /* Set fast-path function pointers */
340 adapter->arm_burst = adapter->ops->arm_burst;
341 adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
342 adapter->cancel_burst = adapter->ops->cancel_burst;
344 adapter->allocated = 1;
349 int __rte_experimental
350 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
354 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
355 FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
357 if (adapter->data->started == 1) {
358 EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
359 "before freeing", adapter->data->id);
363 /* free impl priv data */
364 ret = adapter->ops->uninit(adapter);
368 /* free shared data area */
369 ret = rte_memzone_free(adapter->data->mz);
373 adapter->data = NULL;
374 adapter->allocated = 0;
379 int __rte_experimental
380 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
381 uint32_t *service_id)
383 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
385 if (adapter->data->service_inited && service_id != NULL)
386 *service_id = adapter->data->service_id;
388 return adapter->data->service_inited ? 0 : -ESRCH;
391 int __rte_experimental
392 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
393 struct rte_event_timer_adapter_stats *stats)
395 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
396 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
400 return adapter->ops->stats_get(adapter, stats);
403 int __rte_experimental
404 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
406 ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
407 FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
408 return adapter->ops->stats_reset(adapter);
412 * Software event timer adapter buffer helper functions
415 #define NSECPERSEC 1E9
417 /* Optimizations used to index into the buffer require that the buffer size
420 #define EVENT_BUFFER_SZ 4096
421 #define EVENT_BUFFER_BATCHSZ 32
422 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
424 struct event_buffer {
427 struct rte_event events[EVENT_BUFFER_SZ];
428 } __rte_cache_aligned;
431 event_buffer_full(struct event_buffer *bufp)
433 return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
437 event_buffer_batch_ready(struct event_buffer *bufp)
439 return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
443 event_buffer_init(struct event_buffer *bufp)
445 bufp->head = bufp->tail = 0;
446 memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
450 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
453 struct rte_event *buf_eventp;
455 if (event_buffer_full(bufp))
458 /* Instead of modulus, bitwise AND with mask to get head_idx. */
459 head_idx = bufp->head & EVENT_BUFFER_MASK;
460 buf_eventp = &bufp->events[head_idx];
461 rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
463 /* Wrap automatically when overflow occurs. */
470 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
471 uint16_t *nb_events_flushed,
472 uint16_t *nb_events_inv)
474 uint16_t head_idx, tail_idx, n = 0;
475 struct rte_event *events = bufp->events;
477 /* Instead of modulus, bitwise AND with mask to get index. */
478 head_idx = bufp->head & EVENT_BUFFER_MASK;
479 tail_idx = bufp->tail & EVENT_BUFFER_MASK;
481 /* Determine the largest contigous run we can attempt to enqueue to the
484 if (head_idx > tail_idx)
485 n = head_idx - tail_idx;
486 else if (head_idx < tail_idx)
487 n = EVENT_BUFFER_SZ - tail_idx;
489 *nb_events_flushed = 0;
494 *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
495 &events[tail_idx], n);
496 if (*nb_events_flushed != n && rte_errno == -EINVAL) {
497 EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
501 bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
505 * Software event timer adapter implementation
508 struct rte_event_timer_adapter_sw_data {
509 /* List of messages for outstanding timers */
510 TAILQ_HEAD(, msg) msgs_tailq_head;
511 /* Lock to guard tailq and armed count */
512 rte_spinlock_t msgs_tailq_sl;
513 /* Identifier of service executing timer management logic. */
515 /* The cycle count at which the adapter should next tick */
516 uint64_t next_tick_cycles;
517 /* Incremented as the service moves through phases of an iteration */
518 volatile int service_phase;
519 /* The tick resolution used by adapter instance. May have been
520 * adjusted from what user requested
522 uint64_t timer_tick_ns;
523 /* Maximum timeout in nanoseconds allowed by adapter instance. */
525 /* Ring containing messages to arm or cancel event timers */
526 struct rte_ring *msg_ring;
527 /* Mempool containing msg objects */
528 struct rte_mempool *msg_pool;
529 /* Buffered timer expiry events to be enqueued to an event device. */
530 struct event_buffer buffer;
532 struct rte_event_timer_adapter_stats stats;
533 /* The number of threads currently adding to the message ring */
534 rte_atomic16_t message_producer_count;
537 enum msg_type {MSG_TYPE_ARM, MSG_TYPE_CANCEL};
541 struct rte_event_timer *evtim;
542 struct rte_timer tim;
543 TAILQ_ENTRY(msg) msgs;
547 sw_event_timer_cb(struct rte_timer *tim, void *arg)
550 uint16_t nb_evs_flushed = 0;
551 uint16_t nb_evs_invalid = 0;
553 struct rte_event_timer *evtim;
554 struct rte_event_timer_adapter *adapter;
555 struct rte_event_timer_adapter_sw_data *sw_data;
558 opaque = evtim->impl_opaque[1];
559 adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
560 sw_data = adapter->data->adapter_priv;
562 ret = event_buffer_add(&sw_data->buffer, &evtim->ev);
564 /* If event buffer is full, put timer back in list with
565 * immediate expiry value, so that we process it again on the
568 rte_timer_reset_sync(tim, 0, SINGLE, rte_lcore_id(),
569 sw_event_timer_cb, evtim);
571 sw_data->stats.evtim_retry_count++;
572 EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
573 "immediate expiry value");
575 struct msg *m = container_of(tim, struct msg, tim);
576 TAILQ_REMOVE(&sw_data->msgs_tailq_head, m, msgs);
577 EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
578 evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
580 /* Free the msg object containing the rte_timer now that
581 * we've buffered its event successfully.
583 rte_mempool_put(sw_data->msg_pool, m);
585 /* Bump the count when we successfully add an expiry event to
588 sw_data->stats.evtim_exp_count++;
591 if (event_buffer_batch_ready(&sw_data->buffer)) {
592 event_buffer_flush(&sw_data->buffer,
593 adapter->data->event_dev_id,
594 adapter->data->event_port_id,
598 sw_data->stats.ev_enq_count += nb_evs_flushed;
599 sw_data->stats.ev_inv_count += nb_evs_invalid;
603 static __rte_always_inline uint64_t
604 get_timeout_cycles(struct rte_event_timer *evtim,
605 struct rte_event_timer_adapter *adapter)
608 struct rte_event_timer_adapter_sw_data *sw_data;
610 sw_data = adapter->data->adapter_priv;
611 timeout_ns = evtim->timeout_ticks * sw_data->timer_tick_ns;
612 return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
616 /* This function returns true if one or more (adapter) ticks have occurred since
617 * the last time it was called.
620 adapter_did_tick(struct rte_event_timer_adapter *adapter)
622 uint64_t cycles_per_adapter_tick, start_cycles;
623 uint64_t *next_tick_cyclesp;
624 struct rte_event_timer_adapter_sw_data *sw_data;
626 sw_data = adapter->data->adapter_priv;
627 next_tick_cyclesp = &sw_data->next_tick_cycles;
629 cycles_per_adapter_tick = sw_data->timer_tick_ns *
630 (rte_get_timer_hz() / NSECPERSEC);
632 start_cycles = rte_get_timer_cycles();
634 /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
635 * execute, and set things going.
638 if (start_cycles >= *next_tick_cyclesp) {
639 /* Snap the current cycle count to the preceding adapter tick
642 start_cycles -= start_cycles % cycles_per_adapter_tick;
644 *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
652 /* Check that event timer timeout value is in range */
653 static __rte_always_inline int
654 check_timeout(struct rte_event_timer *evtim,
655 const struct rte_event_timer_adapter *adapter)
658 struct rte_event_timer_adapter_sw_data *sw_data;
660 sw_data = adapter->data->adapter_priv;
661 tmo_nsec = evtim->timeout_ticks * sw_data->timer_tick_ns;
663 if (tmo_nsec > sw_data->max_tmo_ns)
666 if (tmo_nsec < sw_data->timer_tick_ns)
672 /* Check that event timer event queue sched type matches destination event queue
675 static __rte_always_inline int
676 check_destination_event_queue(struct rte_event_timer *evtim,
677 const struct rte_event_timer_adapter *adapter)
682 ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
684 RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
687 if ((ret < 0 && ret != -EOVERFLOW) ||
688 evtim->ev.sched_type != sched_type)
696 sw_event_timer_adapter_service_func(void *arg)
699 uint64_t cycles, opaque;
700 uint16_t nb_evs_flushed = 0;
701 uint16_t nb_evs_invalid = 0;
702 struct rte_event_timer_adapter *adapter;
703 struct rte_event_timer_adapter_sw_data *sw_data;
704 struct rte_event_timer *evtim = NULL;
705 struct rte_timer *tim = NULL;
706 struct msg *msg, *msgs[NB_OBJS];
709 sw_data = adapter->data->adapter_priv;
711 sw_data->service_phase = 1;
714 while (rte_atomic16_read(&sw_data->message_producer_count) > 0 ||
715 !rte_ring_empty(sw_data->msg_ring)) {
717 num_msgs = rte_ring_dequeue_burst(sw_data->msg_ring,
718 (void **)msgs, NB_OBJS, NULL);
720 for (i = 0; i < num_msgs; i++) {
730 EVTIM_SVC_LOG_DBG("dequeued ARM message from "
734 cycles = get_timeout_cycles(evtim,
736 ret = rte_timer_reset(tim, cycles, SINGLE,
740 RTE_ASSERT(ret == 0);
742 evtim->impl_opaque[0] = (uintptr_t)tim;
743 evtim->impl_opaque[1] = (uintptr_t)adapter;
745 TAILQ_INSERT_TAIL(&sw_data->msgs_tailq_head,
749 case MSG_TYPE_CANCEL:
750 EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
752 opaque = evtim->impl_opaque[0];
753 tim = (struct rte_timer *)(uintptr_t)opaque;
754 RTE_ASSERT(tim != NULL);
756 ret = rte_timer_stop(tim);
757 RTE_ASSERT(ret == 0);
759 /* Free the msg object for the original arm
763 m = container_of(tim, struct msg, tim);
764 TAILQ_REMOVE(&sw_data->msgs_tailq_head, m,
766 rte_mempool_put(sw_data->msg_pool, m);
768 /* Free the msg object for the current msg */
769 rte_mempool_put(sw_data->msg_pool, msg);
771 evtim->impl_opaque[0] = 0;
772 evtim->impl_opaque[1] = 0;
779 sw_data->service_phase = 2;
782 if (adapter_did_tick(adapter)) {
785 event_buffer_flush(&sw_data->buffer,
786 adapter->data->event_dev_id,
787 adapter->data->event_port_id,
788 &nb_evs_flushed, &nb_evs_invalid);
790 sw_data->stats.ev_enq_count += nb_evs_flushed;
791 sw_data->stats.ev_inv_count += nb_evs_invalid;
792 sw_data->stats.adapter_tick_count++;
795 sw_data->service_phase = 0;
801 /* The adapter initialization function rounds the mempool size up to the next
802 * power of 2, so we can take the difference between that value and what the
803 * user requested, and use the space for caches. This avoids a scenario where a
804 * user can't arm the number of timers the adapter was configured with because
805 * mempool objects have been lost to caches.
807 * nb_actual should always be a power of 2, so we can iterate over the powers
808 * of 2 to see what the largest cache size we can use is.
811 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
820 if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
821 size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
822 size <= nb_actual / 1.5)
831 #define SW_MIN_INTERVAL 1E5
834 sw_event_timer_adapter_init(struct rte_event_timer_adapter *adapter)
837 struct rte_event_timer_adapter_sw_data *sw_data;
840 struct rte_service_spec service;
841 static bool timer_subsystem_inited; // static initialized to false
843 /* Allocate storage for SW implementation data */
844 char priv_data_name[RTE_RING_NAMESIZE];
845 snprintf(priv_data_name, RTE_RING_NAMESIZE, "sw_evtim_adap_priv_%"PRIu8,
847 adapter->data->adapter_priv = rte_zmalloc_socket(
849 sizeof(struct rte_event_timer_adapter_sw_data),
851 adapter->data->socket_id);
852 if (adapter->data->adapter_priv == NULL) {
853 EVTIM_LOG_ERR("failed to allocate space for private data");
858 if (adapter->data->conf.timer_tick_ns < SW_MIN_INTERVAL) {
859 EVTIM_LOG_ERR("failed to create adapter with requested tick "
865 sw_data = adapter->data->adapter_priv;
867 sw_data->timer_tick_ns = adapter->data->conf.timer_tick_ns;
868 sw_data->max_tmo_ns = adapter->data->conf.max_tmo_ns;
870 TAILQ_INIT(&sw_data->msgs_tailq_head);
871 rte_spinlock_init(&sw_data->msgs_tailq_sl);
872 rte_atomic16_init(&sw_data->message_producer_count);
874 /* Rings require power of 2, so round up to next such value */
875 nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
877 char msg_ring_name[RTE_RING_NAMESIZE];
878 snprintf(msg_ring_name, RTE_RING_NAMESIZE,
879 "sw_evtim_adap_msg_ring_%"PRIu8, adapter->data->id);
880 flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
881 RING_F_SP_ENQ | RING_F_SC_DEQ :
883 sw_data->msg_ring = rte_ring_create(msg_ring_name, nb_timers,
884 adapter->data->socket_id, flags);
885 if (sw_data->msg_ring == NULL) {
886 EVTIM_LOG_ERR("failed to create message ring");
891 char pool_name[RTE_RING_NAMESIZE];
892 snprintf(pool_name, RTE_RING_NAMESIZE, "sw_evtim_adap_msg_pool_%"PRIu8,
895 /* Both the arming/canceling thread and the service thread will do puts
896 * to the mempool, but if the SP_PUT flag is enabled, we can specify
897 * single-consumer get for the mempool.
899 flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
900 MEMPOOL_F_SC_GET : 0;
902 /* The usable size of a ring is count - 1, so subtract one here to
903 * make the counts agree.
905 int pool_size = nb_timers - 1;
906 int cache_size = compute_msg_mempool_cache_size(
907 adapter->data->conf.nb_timers, nb_timers);
908 sw_data->msg_pool = rte_mempool_create(pool_name, pool_size,
909 sizeof(struct msg), cache_size,
910 0, NULL, NULL, NULL, NULL,
911 adapter->data->socket_id, flags);
912 if (sw_data->msg_pool == NULL) {
913 EVTIM_LOG_ERR("failed to create message object mempool");
918 event_buffer_init(&sw_data->buffer);
920 /* Register a service component to run adapter logic */
921 memset(&service, 0, sizeof(service));
922 snprintf(service.name, RTE_SERVICE_NAME_MAX,
923 "sw_evimer_adap_svc_%"PRIu8, adapter->data->id);
924 service.socket_id = adapter->data->socket_id;
925 service.callback = sw_event_timer_adapter_service_func;
926 service.callback_userdata = adapter;
927 service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
928 ret = rte_service_component_register(&service, &sw_data->service_id);
930 EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
931 ": err = %d", service.name, sw_data->service_id,
938 EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
939 sw_data->service_id);
941 adapter->data->service_id = sw_data->service_id;
942 adapter->data->service_inited = 1;
944 if (!timer_subsystem_inited) {
945 rte_timer_subsystem_init();
946 timer_subsystem_inited = true;
952 rte_mempool_free(sw_data->msg_pool);
954 rte_ring_free(sw_data->msg_ring);
961 sw_event_timer_adapter_uninit(struct rte_event_timer_adapter *adapter)
965 struct rte_event_timer_adapter_sw_data *sw_data =
966 adapter->data->adapter_priv;
968 rte_spinlock_lock(&sw_data->msgs_tailq_sl);
970 /* Cancel outstanding rte_timers and free msg objects */
971 m1 = TAILQ_FIRST(&sw_data->msgs_tailq_head);
973 EVTIM_LOG_DBG("freeing outstanding timer");
974 m2 = TAILQ_NEXT(m1, msgs);
976 rte_timer_stop_sync(&m1->tim);
977 rte_mempool_put(sw_data->msg_pool, m1);
982 rte_spinlock_unlock(&sw_data->msgs_tailq_sl);
984 ret = rte_service_component_unregister(sw_data->service_id);
986 EVTIM_LOG_ERR("failed to unregister service component");
990 rte_ring_free(sw_data->msg_ring);
991 rte_mempool_free(sw_data->msg_pool);
992 rte_free(adapter->data->adapter_priv);
997 static inline int32_t
998 get_mapped_count_for_service(uint32_t service_id)
1000 int32_t core_count, i, mapped_count = 0;
1001 uint32_t lcore_arr[RTE_MAX_LCORE];
1003 core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1005 for (i = 0; i < core_count; i++)
1006 if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1009 return mapped_count;
1013 sw_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
1016 struct rte_event_timer_adapter_sw_data *sw_data;
1018 sw_data = adapter->data->adapter_priv;
1020 /* Mapping the service to more than one service core can introduce
1021 * delays while one thread is waiting to acquire a lock, so only allow
1022 * one core to be mapped to the service.
1024 mapped_count = get_mapped_count_for_service(sw_data->service_id);
1026 if (mapped_count == 1)
1027 return rte_service_component_runstate_set(sw_data->service_id,
1030 return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1034 sw_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
1037 struct rte_event_timer_adapter_sw_data *sw_data =
1038 adapter->data->adapter_priv;
1040 ret = rte_service_component_runstate_set(sw_data->service_id, 0);
1044 /* Wait for the service to complete its final iteration before
1047 while (sw_data->service_phase != 0)
1056 sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
1057 struct rte_event_timer_adapter_info *adapter_info)
1059 struct rte_event_timer_adapter_sw_data *sw_data;
1060 sw_data = adapter->data->adapter_priv;
1062 adapter_info->min_resolution_ns = sw_data->timer_tick_ns;
1063 adapter_info->max_tmo_ns = sw_data->max_tmo_ns;
1067 sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter *adapter,
1068 struct rte_event_timer_adapter_stats *stats)
1070 struct rte_event_timer_adapter_sw_data *sw_data;
1071 sw_data = adapter->data->adapter_priv;
1072 *stats = sw_data->stats;
1077 sw_event_timer_adapter_stats_reset(
1078 const struct rte_event_timer_adapter *adapter)
1080 struct rte_event_timer_adapter_sw_data *sw_data;
1081 sw_data = adapter->data->adapter_priv;
1082 memset(&sw_data->stats, 0, sizeof(sw_data->stats));
1086 static __rte_always_inline uint16_t
1087 __sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
1088 struct rte_event_timer **evtims,
1093 struct rte_event_timer_adapter_sw_data *sw_data;
1094 struct msg *msgs[nb_evtims];
1096 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1097 /* Check that the service is running. */
1098 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1104 sw_data = adapter->data->adapter_priv;
1106 ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
1112 /* Let the service know we're producing messages for it to process */
1113 rte_atomic16_inc(&sw_data->message_producer_count);
1115 /* If the service is managing timers, wait for it to finish */
1116 while (sw_data->service_phase == 2)
1121 for (i = 0; i < nb_evtims; i++) {
1122 /* Don't modify the event timer state in these cases */
1123 if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
1124 rte_errno = EALREADY;
1126 } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
1127 evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
1132 ret = check_timeout(evtims[i], adapter);
1134 evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
1139 evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
1144 if (check_destination_event_queue(evtims[i], adapter) < 0) {
1145 evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1150 /* Checks passed, set up a message to enqueue */
1151 msgs[i]->type = MSG_TYPE_ARM;
1152 msgs[i]->evtim = evtims[i];
1154 /* Set the payload pointer if not set. */
1155 if (evtims[i]->ev.event_ptr == NULL)
1156 evtims[i]->ev.event_ptr = evtims[i];
1158 /* msg objects that get enqueued successfully will be freed
1159 * either by a future cancel operation or by the timer
1160 * expiration callback.
1162 if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
1167 EVTIM_LOG_DBG("enqueued ARM message to ring");
1169 evtims[i]->state = RTE_EVENT_TIMER_ARMED;
1172 /* Let the service know we're done producing messages */
1173 rte_atomic16_dec(&sw_data->message_producer_count);
1176 rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
1183 sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
1184 struct rte_event_timer **evtims,
1187 return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
1191 sw_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
1192 struct rte_event_timer **evtims,
1197 struct rte_event_timer_adapter_sw_data *sw_data;
1198 struct msg *msgs[nb_evtims];
1200 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1201 /* Check that the service is running. */
1202 if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1208 sw_data = adapter->data->adapter_priv;
1210 ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
1216 /* Let the service know we're producing messages for it to process */
1217 rte_atomic16_inc(&sw_data->message_producer_count);
1219 /* If the service could be modifying event timer states, wait */
1220 while (sw_data->service_phase == 2)
1225 for (i = 0; i < nb_evtims; i++) {
1226 /* Don't modify the event timer state in these cases */
1227 if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
1228 rte_errno = EALREADY;
1230 } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
1235 msgs[i]->type = MSG_TYPE_CANCEL;
1236 msgs[i]->evtim = evtims[i];
1238 if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
1243 EVTIM_LOG_DBG("enqueued CANCEL message to ring");
1245 evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
1248 /* Let the service know we're done producing messages */
1249 rte_atomic16_dec(&sw_data->message_producer_count);
1252 rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
1259 sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1260 struct rte_event_timer **evtims,
1261 uint64_t timeout_ticks,
1266 for (i = 0; i < nb_evtims; i++)
1267 evtims[i]->timeout_ticks = timeout_ticks;
1269 return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
1272 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops = {
1273 .init = sw_event_timer_adapter_init,
1274 .uninit = sw_event_timer_adapter_uninit,
1275 .start = sw_event_timer_adapter_start,
1276 .stop = sw_event_timer_adapter_stop,
1277 .get_info = sw_event_timer_adapter_get_info,
1278 .stats_get = sw_event_timer_adapter_stats_get,
1279 .stats_reset = sw_event_timer_adapter_stats_reset,
1280 .arm_burst = sw_event_timer_arm_burst,
1281 .arm_tmo_tick_burst = sw_event_timer_arm_tmo_tick_burst,
1282 .cancel_burst = sw_event_timer_cancel_burst,
1285 RTE_INIT(event_timer_adapter_init_log)
1287 evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
1288 if (evtim_logtype >= 0)
1289 rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
1291 evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
1293 if (evtim_buffer_logtype >= 0)
1294 rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
1296 evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
1297 if (evtim_svc_logtype >= 0)
1298 rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);