Add event timer arm timeout burst function.
All the timers requested to be armed have the same timeout.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
#undef FP
};
+ const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2] = {
+#define FP(_name, _f2, _f1, flags) \
+ [_f2][_f1] = otx2_tim_arm_tmo_tick_burst_ ## _name,
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
+ };
+
otx2_tim_ops.arm_burst = arm_burst[tim_ring->optimized]
[tim_ring->ena_dfb][prod_flag];
+ otx2_tim_ops.arm_tmo_tick_burst = arm_tmo_burst[tim_ring->optimized]
+ [tim_ring->ena_dfb];
}
static void
#define OTX2_TIM_MAX_BUCKETS (0xFFFFF)
#define OTX2_TIM_RING_DEF_CHUNK_SZ (4096)
#define OTX2_TIM_CHUNK_ALIGNMENT (16)
+#define OTX2_TIM_MAX_BURST (RTE_CACHE_LINE_SIZE / \
+ OTX2_TIM_CHUNK_ALIGNMENT)
#define OTX2_TIM_NB_CHUNK_SLOTS(sz) (((sz) / OTX2_TIM_CHUNK_ALIGNMENT) - 1)
#define OTX2_TIM_MIN_CHUNK_SLOTS (0x1)
#define OTX2_TIM_MAX_CHUNK_SLOTS (0x1FFE)
TIM_ARM_FASTPATH_MODES
#undef FP
+#define TIM_ARM_TMO_FASTPATH_MODES \
+FP(mod, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB) \
+FP(mod_fb, 0, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB) \
+FP(and, 1, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB) \
+FP(and_fb, 1, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB) \
+
+#define FP(_name, _f2, _f1, flags) \
+uint16_t otx2_tim_arm_tmo_tick_burst_ ## _name( \
+ const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint64_t timeout_tick, const uint16_t nb_timers);
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
+
int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
const struct rte_event_timer_adapter_ops **ops);
return index;
}
+static __rte_always_inline uint16_t
+tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint64_t timeout_tick,
+ const uint16_t nb_timers, const uint8_t flags)
+{
+ struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
+ struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
+ uint16_t set_timers = 0;
+ uint16_t arr_idx = 0;
+ uint16_t idx;
+ int ret;
+
+ if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ tim_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = tim_add_entry_brst(tim_ring, timeout_tick,
+ &tim[set_timers], entry, idx, flags);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+
+ return set_timers;
+}
+
#define FP(_name, _f3, _f2, _f1, _flags) \
uint16_t __rte_noinline \
otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
}
TIM_ARM_FASTPATH_MODES
#undef FP
+
+#define FP(_name, _f2, _f1, _flags) \
+uint16_t __rte_noinline \
+otx2_tim_arm_tmo_tick_burst_ ## _name( \
+ const struct rte_event_timer_adapter *adptr, \
+ struct rte_event_timer **tim, \
+ const uint64_t timeout_tick, \
+ const uint16_t nb_timers) \
+{ \
+ return tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
+ nb_timers, _flags); \
+}
+TIM_ARM_TMO_FASTPATH_MODES
+#undef FP
return 0;
}
+static inline uint16_t
+tim_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct otx2_tim_ent *chunk,
+ struct rte_event_timer ** const tim,
+ const struct otx2_tim_ent * const ents,
+ const struct otx2_tim_bkt * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
+ const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct otx2_tim_ent *ents,
+ const uint16_t nb_timers, const uint8_t flags)
+{
+ struct otx2_tim_ent *chunk;
+ struct otx2_tim_bkt *bkt;
+ uint16_t chunk_remainder;
+ uint16_t index = 0;
+ uint64_t lock_sema;
+ int16_t rem, crem;
+ uint8_t lock_cnt;
+
+__retry:
+ bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
+
+ /* Only one thread beyond this. */
+ lock_sema = tim_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ chunk_remainder = tim_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = tim_ring->nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct otx2_tim_ent *)
+ (uintptr_t)bkt->current_chunk) + crem;
+
+ index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
+ ents, bkt);
+ tim_bkt_sub_rem(bkt, chunk_remainder);
+ tim_bkt_add_nent(bkt, chunk_remainder);
+ }
+
+ if (flags & OTX2_TIM_ENA_FB)
+ chunk = tim_refill_chunk(bkt, tim_ring);
+ if (flags & OTX2_TIM_ENA_DFB)
+ chunk = tim_insert_chunk(bkt, tim_ring);
+
+ if (unlikely(chunk == NULL)) {
+ tim_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+ bkt->current_chunk = (uintptr_t)chunk;
+ tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+
+ rem = nb_timers - chunk_remainder;
+ tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
+ tim_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
+ chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
+
+ tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ tim_bkt_sub_rem(bkt, nb_timers);
+ tim_bkt_add_nent(bkt, nb_timers);
+ }
+
+ tim_bkt_dec_lock(bkt);
+
+ return nb_timers;
+}
+
#endif /* __OTX2_TIM_WORKER_H__ */