+static __rte_always_inline uint16_t
+tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint64_t timeout_tick,
+ const uint16_t nb_timers, const uint8_t flags)
+{
+ struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ uint16_t set_timers = 0;
+ uint16_t arr_idx = 0;
+ uint16_t idx;
+ int ret;
+
+ if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ tim_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = tim_add_entry_brst(tim_ring, timeout_tick,
+ &tim[set_timers], entry, idx, flags);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+ if (flags & OTX2_TIM_ENA_STATS)
+ rte_atomic64_add(&tim_ring->arm_cnt, set_timers);
+
+ return set_timers;
+}
+
+#define FP(_name, _f4, _f3, _f2, _f1, _flags) \