1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_tim_evdev.h"
6 #include "otx2_tim_worker.h"
9 tim_arm_checks(const struct otx2_tim_ring * const tim_ring,
10 struct rte_event_timer * const tim)
12 if (unlikely(tim->state)) {
13 tim->state = RTE_EVENT_TIMER_ERROR;
18 if (unlikely(!tim->timeout_ticks ||
19 tim->timeout_ticks >= tim_ring->nb_bkts)) {
20 tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
21 : RTE_EVENT_TIMER_ERROR_TOOEARLY;
33 tim_format_event(const struct rte_event_timer * const tim,
34 struct otx2_tim_ent * const entry)
36 entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
37 (tim->ev.event & 0xFFFFFFFFF);
38 entry->wqe = tim->ev.u64;
41 static __rte_always_inline uint16_t
42 tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
43 struct rte_event_timer **tim,
44 const uint16_t nb_timers,
47 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
48 struct otx2_tim_ent entry;
52 for (index = 0; index < nb_timers; index++) {
53 if (tim_arm_checks(tim_ring, tim[index]))
56 tim_format_event(tim[index], &entry);
57 if (flags & OTX2_TIM_SP)
58 ret = tim_add_entry_sp(tim_ring,
59 tim[index]->timeout_ticks,
60 tim[index], &entry, flags);
61 if (flags & OTX2_TIM_MP)
62 ret = tim_add_entry_mp(tim_ring,
63 tim[index]->timeout_ticks,
64 tim[index], &entry, flags);
72 if (flags & OTX2_TIM_ENA_STATS)
73 rte_atomic64_add(&tim_ring->arm_cnt, index);
78 static __rte_always_inline uint16_t
79 tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
80 struct rte_event_timer **tim,
81 const uint64_t timeout_tick,
82 const uint16_t nb_timers, const uint8_t flags)
84 struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
85 struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
86 uint16_t set_timers = 0;
91 if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
92 const enum rte_event_timer_state state = timeout_tick ?
93 RTE_EVENT_TIMER_ERROR_TOOLATE :
94 RTE_EVENT_TIMER_ERROR_TOOEARLY;
95 for (idx = 0; idx < nb_timers; idx++)
96 tim[idx]->state = state;
102 while (arr_idx < nb_timers) {
103 for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
105 tim_format_event(tim[arr_idx], &entry[idx]);
107 ret = tim_add_entry_brst(tim_ring, timeout_tick,
108 &tim[set_timers], entry, idx, flags);
113 if (flags & OTX2_TIM_ENA_STATS)
114 rte_atomic64_add(&tim_ring->arm_cnt, set_timers);
119 #define FP(_name, _f4, _f3, _f2, _f1, _flags) \
120 uint16_t __rte_noinline \
121 otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
122 struct rte_event_timer **tim, \
123 const uint16_t nb_timers) \
125 return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \
127 TIM_ARM_FASTPATH_MODES
130 #define FP(_name, _f3, _f2, _f1, _flags) \
131 uint16_t __rte_noinline \
132 otx2_tim_arm_tmo_tick_burst_ ## _name( \
133 const struct rte_event_timer_adapter *adptr, \
134 struct rte_event_timer **tim, \
135 const uint64_t timeout_tick, \
136 const uint16_t nb_timers) \
138 return tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
139 nb_timers, _flags); \
141 TIM_ARM_TMO_FASTPATH_MODES
145 otx2_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
146 struct rte_event_timer **tim,
147 const uint16_t nb_timers)
153 for (index = 0; index < nb_timers; index++) {
154 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
155 rte_errno = EALREADY;
159 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
163 ret = tim_rm_entry(tim[index]);