1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "timvf_worker.h"
8 timvf_timer_reg_checks(const struct timvf_ring * const timr,
9 struct rte_event_timer * const tim)
11 if (unlikely(tim->state)) {
12 tim->state = RTE_EVENT_TIMER_ERROR;
17 if (unlikely(!tim->timeout_ticks ||
18 tim->timeout_ticks >= timr->nb_bkts)) {
19 tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
20 : RTE_EVENT_TIMER_ERROR_TOOEARLY;
31 timvf_format_event(const struct rte_event_timer * const tim,
32 struct tim_mem_entry * const entry)
34 entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
35 (tim->ev.event & 0xFFFFFFFFF);
36 entry->wqe = tim->ev.u64;
40 timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
41 struct rte_event_timer **tim, const uint16_t nb_timers)
47 for (index = 0; index < nb_timers; index++) {
48 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
53 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
57 ret = timvf_rem_entry(tim[index]);
67 timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
68 struct rte_event_timer **tim, const uint16_t nb_timers)
72 struct tim_mem_entry entry;
73 struct timvf_ring *timr = adptr->data->adapter_priv;
74 for (index = 0; index < nb_timers; index++) {
75 if (timvf_timer_reg_checks(timr, tim[index]))
78 timvf_format_event(tim[index], &entry);
79 ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
91 timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
92 struct rte_event_timer **tim, const uint16_t nb_timers)
95 struct timvf_ring *timr = adptr->data->adapter_priv;
97 ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
98 timr->tim_arm_cnt += ret;
104 timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
105 struct rte_event_timer **tim, const uint16_t nb_timers)
109 struct tim_mem_entry entry;
110 struct timvf_ring *timr = adptr->data->adapter_priv;
111 for (index = 0; index < nb_timers; index++) {
112 if (timvf_timer_reg_checks(timr, tim[index]))
114 timvf_format_event(tim[index], &entry);
115 ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
127 timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
128 struct rte_event_timer **tim, const uint16_t nb_timers)
131 struct timvf_ring *timr = adptr->data->adapter_priv;
133 ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
134 timr->tim_arm_cnt += ret;
140 timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
141 struct rte_event_timer **tim, const uint64_t timeout_tick,
142 const uint16_t nb_timers)
145 uint16_t set_timers = 0;
147 uint16_t arr_idx = 0;
148 struct timvf_ring *timr = adptr->data->adapter_priv;
149 struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
151 if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
152 const enum rte_event_timer_state state = timeout_tick ?
153 RTE_EVENT_TIMER_ERROR_TOOLATE :
154 RTE_EVENT_TIMER_ERROR_TOOEARLY;
155 for (idx = 0; idx < nb_timers; idx++)
156 tim[idx]->state = state;
161 while (arr_idx < nb_timers) {
162 for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
164 timvf_format_event(tim[arr_idx], &entry[idx]);
166 ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
178 timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
179 struct rte_event_timer **tim, const uint64_t timeout_tick,
180 const uint16_t nb_timers)
183 struct timvf_ring *timr = adptr->data->adapter_priv;
185 set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
187 timr->tim_arm_cnt += set_timers;
193 timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
196 timr->refill_chunk = timvf_refill_chunk_fpa;
198 timr->refill_chunk = timvf_refill_chunk_generic;