TIM_ARM_TMO_FASTPATH_MODES
#undef FP
+uint16_t
+cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint16_t nb_timers);
+
int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
const struct rte_event_timer_adapter_ops **ops);
}
TIM_ARM_TMO_FASTPATH_MODES
#undef FP
+
+uint16_t
+cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint16_t nb_timers)
+{
+ uint16_t index;
+ int ret;
+
+ RTE_SET_USED(adptr);
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = cnxk_tim_rm_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
return nb_timers;
}
+static int
+cnxk_tim_rm_entry(struct rte_event_timer *tim)
+{
+ struct cnxk_tim_ent *entry;
+ struct cnxk_tim_bkt *bkt;
+ uint64_t lock_sema;
+
+ if (tim->impl_opaque[1] == 0 || tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct cnxk_tim_ent *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ return -ENOENT;
+ }
+
+ bkt = (struct cnxk_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = cnxk_tim_bkt_inc_lock(bkt);
+ if (cnxk_tim_bkt_get_hbt(lock_sema) ||
+ !cnxk_tim_bkt_get_nent(lock_sema)) {
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ cnxk_tim_bkt_dec_lock(bkt);
+ return -ENOENT;
+ }
+
+ entry->w0 = 0;
+ entry->wqe = 0;
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[0] = 0;
+ tim->impl_opaque[1] = 0;
+ cnxk_tim_bkt_dec_lock(bkt);
+
+ return 0;
+}
+
#endif /* __CNXK_TIM_WORKER_H__ */