1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_tim_evdev.h"
6 #include "otx2_tim_worker.h"
9 tim_arm_checks(const struct otx2_tim_ring * const tim_ring,
10 struct rte_event_timer * const tim)
12 if (unlikely(tim->state)) {
13 tim->state = RTE_EVENT_TIMER_ERROR;
18 if (unlikely(!tim->timeout_ticks ||
19 tim->timeout_ticks >= tim_ring->nb_bkts)) {
20 tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
21 : RTE_EVENT_TIMER_ERROR_TOOEARLY;
33 tim_format_event(const struct rte_event_timer * const tim,
34 struct otx2_tim_ent * const entry)
36 entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
37 (tim->ev.event & 0xFFFFFFFFF);
38 entry->wqe = tim->ev.u64;
42 tim_sync_start_cyc(struct otx2_tim_ring *tim_ring)
44 uint64_t cur_cyc = tim_cntvct();
47 if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
48 real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
49 cur_cyc = tim_cntvct();
51 tim_ring->ring_start_cyc = cur_cyc -
52 (real_bkt * tim_ring->tck_int);
53 tim_ring->last_updt_cyc = cur_cyc;
58 static __rte_always_inline uint16_t
59 tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
60 struct rte_event_timer **tim,
61 const uint16_t nb_timers,
64 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
65 struct otx2_tim_ent entry;
69 tim_sync_start_cyc(tim_ring);
70 for (index = 0; index < nb_timers; index++) {
71 if (tim_arm_checks(tim_ring, tim[index]))
74 tim_format_event(tim[index], &entry);
75 if (flags & OTX2_TIM_SP)
76 ret = tim_add_entry_sp(tim_ring,
77 tim[index]->timeout_ticks,
78 tim[index], &entry, flags);
79 if (flags & OTX2_TIM_MP)
80 ret = tim_add_entry_mp(tim_ring,
81 tim[index]->timeout_ticks,
82 tim[index], &entry, flags);
90 if (flags & OTX2_TIM_ENA_STATS)
91 __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
96 static __rte_always_inline uint16_t
97 tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
98 struct rte_event_timer **tim,
99 const uint64_t timeout_tick,
100 const uint16_t nb_timers, const uint8_t flags)
102 struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
103 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
104 uint16_t set_timers = 0;
105 uint16_t arr_idx = 0;
109 if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
110 const enum rte_event_timer_state state = timeout_tick ?
111 RTE_EVENT_TIMER_ERROR_TOOLATE :
112 RTE_EVENT_TIMER_ERROR_TOOEARLY;
113 for (idx = 0; idx < nb_timers; idx++)
114 tim[idx]->state = state;
120 tim_sync_start_cyc(tim_ring);
121 while (arr_idx < nb_timers) {
122 for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
124 tim_format_event(tim[arr_idx], &entry[idx]);
126 ret = tim_add_entry_brst(tim_ring, timeout_tick,
127 &tim[set_timers], entry, idx, flags);
132 if (flags & OTX2_TIM_ENA_STATS)
133 __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
139 #define FP(_name, _f3, _f2, _f1, _flags) \
140 uint16_t __rte_noinline \
141 otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
142 struct rte_event_timer **tim, \
143 const uint16_t nb_timers) \
145 return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \
147 TIM_ARM_FASTPATH_MODES
150 #define FP(_name, _f2, _f1, _flags) \
151 uint16_t __rte_noinline \
152 otx2_tim_arm_tmo_tick_burst_ ## _name( \
153 const struct rte_event_timer_adapter *adptr, \
154 struct rte_event_timer **tim, \
155 const uint64_t timeout_tick, \
156 const uint16_t nb_timers) \
158 return tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
159 nb_timers, _flags); \
161 TIM_ARM_TMO_FASTPATH_MODES
165 otx2_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
166 struct rte_event_timer **tim,
167 const uint16_t nb_timers)
173 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
174 for (index = 0; index < nb_timers; index++) {
175 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
176 rte_errno = EALREADY;
180 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
184 ret = tim_rm_entry(tim[index]);