event/octeontx2: optimize timer Arm routine
[dpdk.git] / drivers / event / octeontx2 / otx2_tim_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #ifndef __OTX2_TIM_WORKER_H__
6 #define __OTX2_TIM_WORKER_H__
7
8 #include "otx2_tim_evdev.h"
9
10 static inline uint8_t
11 tim_bkt_fetch_lock(uint64_t w1)
12 {
13         return (w1 >> TIM_BUCKET_W1_S_LOCK) &
14                 TIM_BUCKET_W1_M_LOCK;
15 }
16
17 static inline int16_t
18 tim_bkt_fetch_rem(uint64_t w1)
19 {
20         return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
21                 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
22 }
23
24 static inline int16_t
25 tim_bkt_get_rem(struct otx2_tim_bkt *bktp)
26 {
27         return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE);
28 }
29
30 static inline void
31 tim_bkt_set_rem(struct otx2_tim_bkt *bktp, uint16_t v)
32 {
33         __atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
34 }
35
36 static inline void
37 tim_bkt_sub_rem(struct otx2_tim_bkt *bktp, uint16_t v)
38 {
39         __atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
40 }
41
42 static inline uint8_t
43 tim_bkt_get_hbt(uint64_t w1)
44 {
45         return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
46 }
47
48 static inline uint8_t
49 tim_bkt_get_bsk(uint64_t w1)
50 {
51         return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
52 }
53
54 static inline uint64_t
55 tim_bkt_clr_bsk(struct otx2_tim_bkt *bktp)
56 {
57         /* Clear everything except lock. */
58         const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
59
60         return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
61 }
62
63 static inline uint64_t
64 tim_bkt_fetch_sema_lock(struct otx2_tim_bkt *bktp)
65 {
66         return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
67                         __ATOMIC_ACQUIRE);
68 }
69
70 static inline uint64_t
71 tim_bkt_fetch_sema(struct otx2_tim_bkt *bktp)
72 {
73         return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED);
74 }
75
76 static inline uint64_t
77 tim_bkt_inc_lock(struct otx2_tim_bkt *bktp)
78 {
79         const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
80
81         return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE);
82 }
83
84 static inline void
85 tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
86 {
87         __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE);
88 }
89
90 static inline void
91 tim_bkt_dec_lock_relaxed(struct otx2_tim_bkt *bktp)
92 {
93         __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED);
94 }
95
96 static inline uint32_t
97 tim_bkt_get_nent(uint64_t w1)
98 {
99         return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
100                 TIM_BUCKET_W1_M_NUM_ENTRIES;
101 }
102
103 static inline void
104 tim_bkt_inc_nent(struct otx2_tim_bkt *bktp)
105 {
106         __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
107 }
108
109 static inline void
110 tim_bkt_add_nent(struct otx2_tim_bkt *bktp, uint32_t v)
111 {
112         __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
113 }
114
115 static inline uint64_t
116 tim_bkt_clr_nent(struct otx2_tim_bkt *bktp)
117 {
118         const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
119                         TIM_BUCKET_W1_S_NUM_ENTRIES);
120
121         return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
122 }
123
124 static inline uint64_t
125 tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
126 {
127         return (n - (d * rte_reciprocal_divide_u64(n, &R)));
128 }
129
130 static __rte_always_inline void
131 tim_get_target_bucket(struct otx2_tim_ring *const tim_ring,
132                       const uint32_t rel_bkt, struct otx2_tim_bkt **bkt,
133                       struct otx2_tim_bkt **mirr_bkt)
134 {
135         const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
136         uint64_t bucket =
137                 rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) +
138                 rel_bkt;
139         uint64_t mirr_bucket = 0;
140
141         bucket =
142                 tim_bkt_fast_mod(bucket, tim_ring->nb_bkts, tim_ring->fast_bkt);
143         mirr_bucket = tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1),
144                                        tim_ring->nb_bkts, tim_ring->fast_bkt);
145         *bkt = &tim_ring->bkt[bucket];
146         *mirr_bkt = &tim_ring->bkt[mirr_bucket];
147 }
148
149 static struct otx2_tim_ent *
150 tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
151             struct otx2_tim_bkt * const bkt)
152 {
153 #define TIM_MAX_OUTSTANDING_OBJ         64
154         void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
155         struct otx2_tim_ent *chunk;
156         struct otx2_tim_ent *pnext;
157         uint8_t objs = 0;
158
159
160         chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
161         chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
162                         tim_ring->nb_chunk_slots)->w0;
163         while (chunk) {
164                 pnext = (struct otx2_tim_ent *)(uintptr_t)
165                         ((chunk + tim_ring->nb_chunk_slots)->w0);
166                 if (objs == TIM_MAX_OUTSTANDING_OBJ) {
167                         rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
168                                              objs);
169                         objs = 0;
170                 }
171                 pend_chunks[objs++] = chunk;
172                 chunk = pnext;
173         }
174
175         if (objs)
176                 rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
177                                 objs);
178
179         return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
180 }
181
182 static struct otx2_tim_ent *
183 tim_refill_chunk(struct otx2_tim_bkt * const bkt,
184                  struct otx2_tim_bkt * const mirr_bkt,
185                  struct otx2_tim_ring * const tim_ring)
186 {
187         struct otx2_tim_ent *chunk;
188
189         if (bkt->nb_entry || !bkt->first_chunk) {
190                 if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
191                                              (void **)&chunk)))
192                         return NULL;
193                 if (bkt->nb_entry) {
194                         *(uint64_t *)(((struct otx2_tim_ent *)
195                                                 mirr_bkt->current_chunk) +
196                                         tim_ring->nb_chunk_slots) =
197                                 (uintptr_t)chunk;
198                 } else {
199                         bkt->first_chunk = (uintptr_t)chunk;
200                 }
201         } else {
202                 chunk = tim_clr_bkt(tim_ring, bkt);
203                 bkt->first_chunk = (uintptr_t)chunk;
204         }
205         *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
206
207         return chunk;
208 }
209
210 static struct otx2_tim_ent *
211 tim_insert_chunk(struct otx2_tim_bkt * const bkt,
212                  struct otx2_tim_bkt * const mirr_bkt,
213                  struct otx2_tim_ring * const tim_ring)
214 {
215         struct otx2_tim_ent *chunk;
216
217         if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
218                 return NULL;
219
220         *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
221         if (bkt->nb_entry) {
222                 *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
223                                         mirr_bkt->current_chunk) +
224                                 tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
225         } else {
226                 bkt->first_chunk = (uintptr_t)chunk;
227         }
228         return chunk;
229 }
230
231 static __rte_always_inline int
232 tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
233                  const uint32_t rel_bkt,
234                  struct rte_event_timer * const tim,
235                  const struct otx2_tim_ent * const pent,
236                  const uint8_t flags)
237 {
238         struct otx2_tim_bkt *mirr_bkt;
239         struct otx2_tim_ent *chunk;
240         struct otx2_tim_bkt *bkt;
241         uint64_t lock_sema;
242         int16_t rem;
243
244 __retry:
245         tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
246
247         /* Get Bucket sema*/
248         lock_sema = tim_bkt_fetch_sema_lock(bkt);
249
250         /* Bucket related checks. */
251         if (unlikely(tim_bkt_get_hbt(lock_sema))) {
252                 if (tim_bkt_get_nent(lock_sema) != 0) {
253                         uint64_t hbt_state;
254 #ifdef RTE_ARCH_ARM64
255                         asm volatile("          ldxr %[hbt], [%[w1]]    \n"
256                                      "          tbz %[hbt], 33, dne%=   \n"
257                                      "          sevl                    \n"
258                                      "rty%=:    wfe                     \n"
259                                      "          ldxr %[hbt], [%[w1]]    \n"
260                                      "          tbnz %[hbt], 33, rty%=  \n"
261                                      "dne%=:                            \n"
262                                      : [hbt] "=&r"(hbt_state)
263                                      : [w1] "r"((&bkt->w1))
264                                      : "memory");
265 #else
266                         do {
267                                 hbt_state = __atomic_load_n(&bkt->w1,
268                                                             __ATOMIC_RELAXED);
269                         } while (hbt_state & BIT_ULL(33));
270 #endif
271
272                         if (!(hbt_state & BIT_ULL(34))) {
273                                 tim_bkt_dec_lock(bkt);
274                                 goto __retry;
275                         }
276                 }
277         }
278         /* Insert the work. */
279         rem = tim_bkt_fetch_rem(lock_sema);
280
281         if (!rem) {
282                 if (flags & OTX2_TIM_ENA_FB)
283                         chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
284                 if (flags & OTX2_TIM_ENA_DFB)
285                         chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
286
287                 if (unlikely(chunk == NULL)) {
288                         bkt->chunk_remainder = 0;
289                         tim->impl_opaque[0] = 0;
290                         tim->impl_opaque[1] = 0;
291                         tim->state = RTE_EVENT_TIMER_ERROR;
292                         tim_bkt_dec_lock(bkt);
293                         return -ENOMEM;
294                 }
295                 mirr_bkt->current_chunk = (uintptr_t)chunk;
296                 bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
297         } else {
298                 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
299                 chunk += tim_ring->nb_chunk_slots - rem;
300         }
301
302         /* Copy work entry. */
303         *chunk = *pent;
304
305         tim->impl_opaque[0] = (uintptr_t)chunk;
306         tim->impl_opaque[1] = (uintptr_t)bkt;
307         __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
308         tim_bkt_inc_nent(bkt);
309         tim_bkt_dec_lock_relaxed(bkt);
310
311         return 0;
312 }
313
314 static __rte_always_inline int
315 tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
316                  const uint32_t rel_bkt,
317                  struct rte_event_timer * const tim,
318                  const struct otx2_tim_ent * const pent,
319                  const uint8_t flags)
320 {
321         struct otx2_tim_bkt *mirr_bkt;
322         struct otx2_tim_ent *chunk;
323         struct otx2_tim_bkt *bkt;
324         uint64_t lock_sema;
325         int16_t rem;
326
327 __retry:
328         tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
329         /* Get Bucket sema*/
330         lock_sema = tim_bkt_fetch_sema_lock(bkt);
331
332         /* Bucket related checks. */
333         if (unlikely(tim_bkt_get_hbt(lock_sema))) {
334                 if (tim_bkt_get_nent(lock_sema) != 0) {
335                         uint64_t hbt_state;
336 #ifdef RTE_ARCH_ARM64
337                         asm volatile("          ldxr %[hbt], [%[w1]]    \n"
338                                      "          tbz %[hbt], 33, dne%=   \n"
339                                      "          sevl                    \n"
340                                      "rty%=:    wfe                     \n"
341                                      "          ldxr %[hbt], [%[w1]]    \n"
342                                      "          tbnz %[hbt], 33, rty%=  \n"
343                                      "dne%=:                            \n"
344                                      : [hbt] "=&r"(hbt_state)
345                                      : [w1] "r"((&bkt->w1))
346                                      : "memory");
347 #else
348                         do {
349                                 hbt_state = __atomic_load_n(&bkt->w1,
350                                                             __ATOMIC_RELAXED);
351                         } while (hbt_state & BIT_ULL(33));
352 #endif
353
354                         if (!(hbt_state & BIT_ULL(34))) {
355                                 tim_bkt_dec_lock(bkt);
356                                 goto __retry;
357                         }
358                 }
359         }
360
361         rem = tim_bkt_fetch_rem(lock_sema);
362         if (rem < 0) {
363                 tim_bkt_dec_lock(bkt);
364 #ifdef RTE_ARCH_ARM64
365                 uint64_t w1;
366                 asm volatile("          ldxr %[w1], [%[crem]]   \n"
367                              "          tbz %[w1], 63, dne%=            \n"
368                              "          sevl                            \n"
369                              "rty%=:    wfe                             \n"
370                              "          ldxr %[w1], [%[crem]]   \n"
371                              "          tbnz %[w1], 63, rty%=           \n"
372                              "dne%=:                                    \n"
373                              : [w1] "=&r"(w1)
374                              : [crem] "r"(&bkt->w1)
375                              : "memory");
376 #else
377                 while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
378                        0)
379                         ;
380 #endif
381                 goto __retry;
382         } else if (!rem) {
383                 /* Only one thread can be here*/
384                 if (flags & OTX2_TIM_ENA_FB)
385                         chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
386                 if (flags & OTX2_TIM_ENA_DFB)
387                         chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
388
389                 if (unlikely(chunk == NULL)) {
390                         tim->impl_opaque[0] = 0;
391                         tim->impl_opaque[1] = 0;
392                         tim->state = RTE_EVENT_TIMER_ERROR;
393                         tim_bkt_set_rem(bkt, 0);
394                         tim_bkt_dec_lock(bkt);
395                         return -ENOMEM;
396                 }
397                 *chunk = *pent;
398                 if (tim_bkt_fetch_lock(lock_sema)) {
399                         do {
400                                 lock_sema = __atomic_load_n(&bkt->w1,
401                                                             __ATOMIC_RELAXED);
402                         } while (tim_bkt_fetch_lock(lock_sema) - 1);
403                         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
404                 }
405                 mirr_bkt->current_chunk = (uintptr_t)chunk;
406                 __atomic_store_n(&bkt->chunk_remainder,
407                                 tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
408         } else {
409                 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
410                 chunk += tim_ring->nb_chunk_slots - rem;
411                 *chunk = *pent;
412         }
413
414         tim->impl_opaque[0] = (uintptr_t)chunk;
415         tim->impl_opaque[1] = (uintptr_t)bkt;
416         __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
417         tim_bkt_inc_nent(bkt);
418         tim_bkt_dec_lock_relaxed(bkt);
419
420         return 0;
421 }
422
423 static inline uint16_t
424 tim_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
425             struct otx2_tim_ent *chunk,
426             struct rte_event_timer ** const tim,
427             const struct otx2_tim_ent * const ents,
428             const struct otx2_tim_bkt * const bkt)
429 {
430         for (; index < cpy_lmt; index++) {
431                 *chunk = *(ents + index);
432                 tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
433                 tim[index]->impl_opaque[1] = (uintptr_t)bkt;
434                 tim[index]->state = RTE_EVENT_TIMER_ARMED;
435         }
436
437         return index;
438 }
439
440 /* Burst mode functions */
441 static inline int
442 tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
443                    const uint16_t rel_bkt,
444                    struct rte_event_timer ** const tim,
445                    const struct otx2_tim_ent *ents,
446                    const uint16_t nb_timers, const uint8_t flags)
447 {
448         struct otx2_tim_ent *chunk = NULL;
449         struct otx2_tim_bkt *mirr_bkt;
450         struct otx2_tim_bkt *bkt;
451         uint16_t chunk_remainder;
452         uint16_t index = 0;
453         uint64_t lock_sema;
454         int16_t rem, crem;
455         uint8_t lock_cnt;
456
457 __retry:
458         tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
459
460         /* Only one thread beyond this. */
461         lock_sema = tim_bkt_inc_lock(bkt);
462         lock_cnt = (uint8_t)
463                 ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
464
465         if (lock_cnt) {
466                 tim_bkt_dec_lock(bkt);
467 #ifdef RTE_ARCH_ARM64
468                 asm volatile("          ldxrb %w[lock_cnt], [%[lock]]   \n"
469                              "          tst %w[lock_cnt], 255           \n"
470                              "          beq dne%=                       \n"
471                              "          sevl                            \n"
472                              "rty%=:    wfe                             \n"
473                              "          ldxrb %w[lock_cnt], [%[lock]]   \n"
474                              "          tst %w[lock_cnt], 255           \n"
475                              "          bne rty%=                       \n"
476                              "dne%=:                                    \n"
477                              : [lock_cnt] "=&r"(lock_cnt)
478                              : [lock] "r"(&bkt->lock)
479                              : "memory");
480 #else
481                 while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED))
482                         ;
483 #endif
484                 goto __retry;
485         }
486
487         /* Bucket related checks. */
488         if (unlikely(tim_bkt_get_hbt(lock_sema))) {
489                 if (tim_bkt_get_nent(lock_sema) != 0) {
490                         uint64_t hbt_state;
491 #ifdef RTE_ARCH_ARM64
492                         asm volatile("          ldxr %[hbt], [%[w1]]    \n"
493                                      "          tbz %[hbt], 33, dne%=   \n"
494                                      "          sevl                    \n"
495                                      "rty%=:    wfe                     \n"
496                                      "          ldxr %[hbt], [%[w1]]    \n"
497                                      "          tbnz %[hbt], 33, rty%=  \n"
498                                      "dne%=:                            \n"
499                                      : [hbt] "=&r"(hbt_state)
500                                      : [w1] "r"((&bkt->w1))
501                                      : "memory");
502 #else
503                         do {
504                                 hbt_state = __atomic_load_n(&bkt->w1,
505                                                             __ATOMIC_RELAXED);
506                         } while (hbt_state & BIT_ULL(33));
507 #endif
508
509                         if (!(hbt_state & BIT_ULL(34))) {
510                                 tim_bkt_dec_lock(bkt);
511                                 goto __retry;
512                         }
513                 }
514         }
515
516         chunk_remainder = tim_bkt_fetch_rem(lock_sema);
517         rem = chunk_remainder - nb_timers;
518         if (rem < 0) {
519                 crem = tim_ring->nb_chunk_slots - chunk_remainder;
520                 if (chunk_remainder && crem) {
521                         chunk = ((struct otx2_tim_ent *)
522                                         mirr_bkt->current_chunk) + crem;
523
524                         index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
525                                             ents, bkt);
526                         tim_bkt_sub_rem(bkt, chunk_remainder);
527                         tim_bkt_add_nent(bkt, chunk_remainder);
528                 }
529
530                 if (flags & OTX2_TIM_ENA_FB)
531                         chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
532                 if (flags & OTX2_TIM_ENA_DFB)
533                         chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
534
535                 if (unlikely(chunk == NULL)) {
536                         tim_bkt_dec_lock(bkt);
537                         rte_errno = ENOMEM;
538                         tim[index]->state = RTE_EVENT_TIMER_ERROR;
539                         return crem;
540                 }
541                 *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
542                 mirr_bkt->current_chunk = (uintptr_t)chunk;
543                 tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
544
545                 rem = nb_timers - chunk_remainder;
546                 tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
547                 tim_bkt_add_nent(bkt, rem);
548         } else {
549                 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
550                 chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
551
552                 tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
553                 tim_bkt_sub_rem(bkt, nb_timers);
554                 tim_bkt_add_nent(bkt, nb_timers);
555         }
556
557         tim_bkt_dec_lock(bkt);
558
559         return nb_timers;
560 }
561
562 static int
563 tim_rm_entry(struct rte_event_timer *tim)
564 {
565         struct otx2_tim_ent *entry;
566         struct otx2_tim_bkt *bkt;
567         uint64_t lock_sema;
568
569         if (tim->impl_opaque[1] == 0 || tim->impl_opaque[0] == 0)
570                 return -ENOENT;
571
572         entry = (struct otx2_tim_ent *)(uintptr_t)tim->impl_opaque[0];
573         if (entry->wqe != tim->ev.u64) {
574                 tim->impl_opaque[0] = 0;
575                 tim->impl_opaque[1] = 0;
576                 return -ENOENT;
577         }
578
579         bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
580         lock_sema = tim_bkt_inc_lock(bkt);
581         if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
582                 tim->impl_opaque[0] = 0;
583                 tim->impl_opaque[1] = 0;
584                 tim_bkt_dec_lock(bkt);
585                 return -ENOENT;
586         }
587
588         entry->w0 = 0;
589         entry->wqe = 0;
590         tim->state = RTE_EVENT_TIMER_CANCELED;
591         tim->impl_opaque[0] = 0;
592         tim->impl_opaque[1] = 0;
593         tim_bkt_dec_lock(bkt);
594
595         return 0;
596 }
597
598 #endif /* __OTX2_TIM_WORKER_H__ */