1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_tim_evdev.h"
6 #include "cnxk_tim_worker.h"
9 cnxk_tim_arm_checks(const struct cnxk_tim_ring *const tim_ring,
10 struct rte_event_timer *const tim)
12 if (unlikely(tim->state)) {
13 tim->state = RTE_EVENT_TIMER_ERROR;
18 if (unlikely(!tim->timeout_ticks ||
19 tim->timeout_ticks > tim_ring->nb_bkts)) {
20 tim->state = tim->timeout_ticks ?
21 RTE_EVENT_TIMER_ERROR_TOOLATE :
22 RTE_EVENT_TIMER_ERROR_TOOEARLY;
34 cnxk_tim_format_event(const struct rte_event_timer *const tim,
35 struct cnxk_tim_ent *const entry)
37 entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
38 (tim->ev.event & 0xFFFFFFFFF);
39 entry->wqe = tim->ev.u64;
43 cnxk_tim_sync_start_cyc(struct cnxk_tim_ring *tim_ring)
45 uint64_t cur_cyc = cnxk_tim_cntvct();
48 if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
49 real_bkt = plt_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
50 cur_cyc = cnxk_tim_cntvct();
52 tim_ring->ring_start_cyc =
53 cur_cyc - (real_bkt * tim_ring->tck_int);
54 tim_ring->last_updt_cyc = cur_cyc;
58 static __rte_always_inline uint16_t
59 cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
60 struct rte_event_timer **tim, const uint16_t nb_timers,
63 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
64 struct cnxk_tim_ent entry;
68 cnxk_tim_sync_start_cyc(tim_ring);
69 for (index = 0; index < nb_timers; index++) {
70 if (cnxk_tim_arm_checks(tim_ring, tim[index]))
73 cnxk_tim_format_event(tim[index], &entry);
74 if (flags & CNXK_TIM_SP)
75 ret = cnxk_tim_add_entry_sp(tim_ring,
76 tim[index]->timeout_ticks,
77 tim[index], &entry, flags);
78 if (flags & CNXK_TIM_MP)
79 ret = cnxk_tim_add_entry_mp(tim_ring,
80 tim[index]->timeout_ticks,
81 tim[index], &entry, flags);
89 if (flags & CNXK_TIM_ENA_STATS)
90 __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
95 #define FP(_name, _f3, _f2, _f1, _flags) \
96 uint16_t __rte_noinline cnxk_tim_arm_burst_##_name( \
97 const struct rte_event_timer_adapter *adptr, \
98 struct rte_event_timer **tim, const uint16_t nb_timers) \
100 return cnxk_tim_timer_arm_burst(adptr, tim, nb_timers, \
103 TIM_ARM_FASTPATH_MODES
106 static __rte_always_inline uint16_t
107 cnxk_tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
108 struct rte_event_timer **tim,
109 const uint64_t timeout_tick,
110 const uint16_t nb_timers, const uint8_t flags)
112 struct cnxk_tim_ent entry[CNXK_TIM_MAX_BURST] __rte_cache_aligned;
113 struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
114 uint16_t set_timers = 0;
115 uint16_t arr_idx = 0;
119 if (unlikely(!timeout_tick || timeout_tick > tim_ring->nb_bkts)) {
120 const enum rte_event_timer_state state =
121 timeout_tick ? RTE_EVENT_TIMER_ERROR_TOOLATE :
122 RTE_EVENT_TIMER_ERROR_TOOEARLY;
123 for (idx = 0; idx < nb_timers; idx++)
124 tim[idx]->state = state;
130 cnxk_tim_sync_start_cyc(tim_ring);
131 while (arr_idx < nb_timers) {
132 for (idx = 0; idx < CNXK_TIM_MAX_BURST && (arr_idx < nb_timers);
134 cnxk_tim_format_event(tim[arr_idx], &entry[idx]);
136 ret = cnxk_tim_add_entry_brst(tim_ring, timeout_tick,
137 &tim[set_timers], entry, idx,
144 if (flags & CNXK_TIM_ENA_STATS)
145 __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
151 #define FP(_name, _f2, _f1, _flags) \
152 uint16_t __rte_noinline cnxk_tim_arm_tmo_tick_burst_##_name( \
153 const struct rte_event_timer_adapter *adptr, \
154 struct rte_event_timer **tim, const uint64_t timeout_tick, \
155 const uint16_t nb_timers) \
157 return cnxk_tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
158 nb_timers, _flags); \
160 TIM_ARM_TMO_FASTPATH_MODES
164 cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
165 struct rte_event_timer **tim,
166 const uint16_t nb_timers)
172 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
173 for (index = 0; index < nb_timers; index++) {
174 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
175 rte_errno = EALREADY;
179 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
183 ret = cnxk_tim_rm_entry(tim[index]);