event/cnxk: add timer arm routine
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Tue, 4 May 2021 00:27:20 +0000 (05:57 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 4 May 2021 06:10:32 +0000 (08:10 +0200)
Add event timer arm routine.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
drivers/event/cnxk/cnxk_tim_evdev.c
drivers/event/cnxk/cnxk_tim_evdev.h
drivers/event/cnxk/cnxk_tim_worker.c
drivers/event/cnxk/cnxk_tim_worker.h

index e06fe2f..ecc952a 100644 (file)
@@ -76,6 +76,21 @@ free:
        return rc;
 }
 
+static void
+cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
+{
+       uint8_t prod_flag = !tim_ring->prod_type_sp;
+
+       /* [DFB/FB] [SP][MP]*/
+       const rte_event_timer_arm_burst_t arm_burst[2][2] = {
+#define FP(_name, _f2, _f1, flags) [_f2][_f1] = cnxk_tim_arm_burst_##_name,
+               TIM_ARM_FASTPATH_MODES
+#undef FP
+       };
+
+       cnxk_tim_ops.arm_burst = arm_burst[tim_ring->ena_dfb][prod_flag];
+}
+
 static void
 cnxk_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
                       struct rte_event_timer_adapter_info *adptr_info)
@@ -173,6 +188,9 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
        plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE);
        plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
 
+       /* Set fastpath ops. */
+       cnxk_tim_set_fp_ops(tim_ring);
+
        /* Update SSO xae count. */
        cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
                              RTE_EVENT_TYPE_TIMER);
index f689541..1f2aad1 100644 (file)
@@ -14,6 +14,7 @@
 #include <rte_event_timer_adapter.h>
 #include <rte_malloc.h>
 #include <rte_memzone.h>
+#include <rte_reciprocal.h>
 
 #include "roc_api.h"
 
 #define CNXK_TIM_CHNK_SLOTS  "tim_chnk_slots"
 #define CNXK_TIM_RINGS_LMT   "tim_rings_lmt"
 
+#define CNXK_TIM_SP     0x1
+#define CNXK_TIM_MP     0x2
+#define CNXK_TIM_ENA_FB         0x10
+#define CNXK_TIM_ENA_DFB 0x20
+
 #define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
 #define TIM_BUCKET_W1_M_CHUNK_REMAINDER                                        \
        ((1ULL << (64 - TIM_BUCKET_W1_S_CHUNK_REMAINDER)) - 1)
@@ -107,10 +113,14 @@ struct cnxk_tim_ring {
        uintptr_t base;
        uint16_t nb_chunk_slots;
        uint32_t nb_bkts;
+       uint64_t last_updt_cyc;
+       uint64_t ring_start_cyc;
        uint64_t tck_int;
        uint64_t tot_int;
        struct cnxk_tim_bkt *bkt;
        struct rte_mempool *chunk_pool;
+       struct rte_reciprocal_u64 fast_div;
+       struct rte_reciprocal_u64 fast_bkt;
        uint64_t arm_cnt;
        uint8_t prod_type_sp;
        uint8_t disable_npa;
@@ -201,6 +211,19 @@ cnxk_tim_cntfrq(void)
 }
 #endif
 
+#define TIM_ARM_FASTPATH_MODES                                                 \
+       FP(sp, 0, 0, CNXK_TIM_ENA_DFB | CNXK_TIM_SP)                           \
+       FP(mp, 0, 1, CNXK_TIM_ENA_DFB | CNXK_TIM_MP)                           \
+       FP(fb_sp, 1, 0, CNXK_TIM_ENA_FB | CNXK_TIM_SP)                         \
+       FP(fb_mp, 1, 1, CNXK_TIM_ENA_FB | CNXK_TIM_MP)
+
+#define FP(_name, _f2, _f1, flags)                                             \
+       uint16_t cnxk_tim_arm_burst_##_name(                                   \
+               const struct rte_event_timer_adapter *adptr,                   \
+               struct rte_event_timer **tim, const uint16_t nb_timers);
+TIM_ARM_FASTPATH_MODES
+#undef FP
+
 int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
                      uint32_t *caps,
                      const struct rte_event_timer_adapter_ops **ops);
index 49ee852..268f845 100644 (file)
@@ -4,3 +4,98 @@
 
 #include "cnxk_tim_evdev.h"
 #include "cnxk_tim_worker.h"
+
+static inline int
+cnxk_tim_arm_checks(const struct cnxk_tim_ring *const tim_ring,
+                   struct rte_event_timer *const tim)
+{
+       if (unlikely(tim->state)) {
+               tim->state = RTE_EVENT_TIMER_ERROR;
+               rte_errno = EALREADY;
+               goto fail;
+       }
+
+       if (unlikely(!tim->timeout_ticks ||
+                    tim->timeout_ticks > tim_ring->nb_bkts)) {
+               tim->state = tim->timeout_ticks ?
+                                          RTE_EVENT_TIMER_ERROR_TOOLATE :
+                                          RTE_EVENT_TIMER_ERROR_TOOEARLY;
+               rte_errno = EINVAL;
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       return -EINVAL;
+}
+
+static inline void
+cnxk_tim_format_event(const struct rte_event_timer *const tim,
+                     struct cnxk_tim_ent *const entry)
+{
+       entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+                   (tim->ev.event & 0xFFFFFFFFF);
+       entry->wqe = tim->ev.u64;
+}
+
+static inline void
+cnxk_tim_sync_start_cyc(struct cnxk_tim_ring *tim_ring)
+{
+       uint64_t cur_cyc = cnxk_tim_cntvct();
+       uint32_t real_bkt;
+
+       if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
+               real_bkt = plt_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
+               cur_cyc = cnxk_tim_cntvct();
+
+               tim_ring->ring_start_cyc =
+                       cur_cyc - (real_bkt * tim_ring->tck_int);
+               tim_ring->last_updt_cyc = cur_cyc;
+       }
+}
+
+static __rte_always_inline uint16_t
+cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
+                        struct rte_event_timer **tim, const uint16_t nb_timers,
+                        const uint8_t flags)
+{
+       struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+       struct cnxk_tim_ent entry;
+       uint16_t index;
+       int ret;
+
+       cnxk_tim_sync_start_cyc(tim_ring);
+       for (index = 0; index < nb_timers; index++) {
+               if (cnxk_tim_arm_checks(tim_ring, tim[index]))
+                       break;
+
+               cnxk_tim_format_event(tim[index], &entry);
+               if (flags & CNXK_TIM_SP)
+                       ret = cnxk_tim_add_entry_sp(tim_ring,
+                                                   tim[index]->timeout_ticks,
+                                                   tim[index], &entry, flags);
+               if (flags & CNXK_TIM_MP)
+                       ret = cnxk_tim_add_entry_mp(tim_ring,
+                                                   tim[index]->timeout_ticks,
+                                                   tim[index], &entry, flags);
+
+               if (unlikely(ret)) {
+                       rte_errno = -ret;
+                       break;
+               }
+       }
+
+       return index;
+}
+
+#define FP(_name, _f2, _f1, _flags)                                            \
+       uint16_t __rte_noinline cnxk_tim_arm_burst_##_name(                    \
+               const struct rte_event_timer_adapter *adptr,                   \
+               struct rte_event_timer **tim, const uint16_t nb_timers)        \
+       {                                                                      \
+               return cnxk_tim_timer_arm_burst(adptr, tim, nb_timers,         \
+                                               _flags);                       \
+       }
+TIM_ARM_FASTPATH_MODES
+#undef FP
index d56e673..de8464e 100644 (file)
@@ -120,4 +120,304 @@ cnxk_tim_bkt_clr_nent(struct cnxk_tim_bkt *bktp)
        return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
 }
 
+static inline uint64_t
+cnxk_tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
+{
+       return (n - (d * rte_reciprocal_divide_u64(n, &R)));
+}
+
+static __rte_always_inline void
+cnxk_tim_get_target_bucket(struct cnxk_tim_ring *const tim_ring,
+                          const uint32_t rel_bkt, struct cnxk_tim_bkt **bkt,
+                          struct cnxk_tim_bkt **mirr_bkt)
+{
+       const uint64_t bkt_cyc = cnxk_tim_cntvct() - tim_ring->ring_start_cyc;
+       uint64_t bucket =
+               rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) +
+               rel_bkt;
+       uint64_t mirr_bucket = 0;
+
+       bucket = cnxk_tim_bkt_fast_mod(bucket, tim_ring->nb_bkts,
+                                      tim_ring->fast_bkt);
+       mirr_bucket =
+               cnxk_tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1),
+                                     tim_ring->nb_bkts, tim_ring->fast_bkt);
+       *bkt = &tim_ring->bkt[bucket];
+       *mirr_bkt = &tim_ring->bkt[mirr_bucket];
+}
+
+static struct cnxk_tim_ent *
+cnxk_tim_clr_bkt(struct cnxk_tim_ring *const tim_ring,
+                struct cnxk_tim_bkt *const bkt)
+{
+#define TIM_MAX_OUTSTANDING_OBJ 64
+       void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
+       struct cnxk_tim_ent *chunk;
+       struct cnxk_tim_ent *pnext;
+       uint8_t objs = 0;
+
+       chunk = ((struct cnxk_tim_ent *)(uintptr_t)bkt->first_chunk);
+       chunk = (struct cnxk_tim_ent *)(uintptr_t)(chunk +
+                                                  tim_ring->nb_chunk_slots)
+                       ->w0;
+       while (chunk) {
+               pnext = (struct cnxk_tim_ent *)(uintptr_t)(
+                       (chunk + tim_ring->nb_chunk_slots)->w0);
+               if (objs == TIM_MAX_OUTSTANDING_OBJ) {
+                       rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
+                                            objs);
+                       objs = 0;
+               }
+               pend_chunks[objs++] = chunk;
+               chunk = pnext;
+       }
+
+       if (objs)
+               rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks, objs);
+
+       return (struct cnxk_tim_ent *)(uintptr_t)bkt->first_chunk;
+}
+
+static struct cnxk_tim_ent *
+cnxk_tim_refill_chunk(struct cnxk_tim_bkt *const bkt,
+                     struct cnxk_tim_bkt *const mirr_bkt,
+                     struct cnxk_tim_ring *const tim_ring)
+{
+       struct cnxk_tim_ent *chunk;
+
+       if (bkt->nb_entry || !bkt->first_chunk) {
+               if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
+                                            (void **)&chunk)))
+                       return NULL;
+               if (bkt->nb_entry) {
+                       *(uint64_t *)(((struct cnxk_tim_ent *)
+                                              mirr_bkt->current_chunk) +
+                                     tim_ring->nb_chunk_slots) =
+                               (uintptr_t)chunk;
+               } else {
+                       bkt->first_chunk = (uintptr_t)chunk;
+               }
+       } else {
+               chunk = cnxk_tim_clr_bkt(tim_ring, bkt);
+               bkt->first_chunk = (uintptr_t)chunk;
+       }
+       *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+
+       return chunk;
+}
+
+static struct cnxk_tim_ent *
+cnxk_tim_insert_chunk(struct cnxk_tim_bkt *const bkt,
+                     struct cnxk_tim_bkt *const mirr_bkt,
+                     struct cnxk_tim_ring *const tim_ring)
+{
+       struct cnxk_tim_ent *chunk;
+
+       if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
+               return NULL;
+
+       *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
+       if (bkt->nb_entry) {
+               *(uint64_t *)(((struct cnxk_tim_ent *)(uintptr_t)
+                                      mirr_bkt->current_chunk) +
+                             tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
+       } else {
+               bkt->first_chunk = (uintptr_t)chunk;
+       }
+       return chunk;
+}
+
+static __rte_always_inline int
+cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring,
+                     const uint32_t rel_bkt, struct rte_event_timer *const tim,
+                     const struct cnxk_tim_ent *const pent,
+                     const uint8_t flags)
+{
+       struct cnxk_tim_bkt *mirr_bkt;
+       struct cnxk_tim_ent *chunk;
+       struct cnxk_tim_bkt *bkt;
+       uint64_t lock_sema;
+       int16_t rem;
+
+__retry:
+       cnxk_tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
+
+       /* Get Bucket sema*/
+       lock_sema = cnxk_tim_bkt_fetch_sema_lock(bkt);
+
+       /* Bucket related checks. */
+       if (unlikely(cnxk_tim_bkt_get_hbt(lock_sema))) {
+               if (cnxk_tim_bkt_get_nent(lock_sema) != 0) {
+                       uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+                       asm volatile(PLT_CPU_FEATURE_PREAMBLE
+                                    "          ldxr %[hbt], [%[w1]]    \n"
+                                    "          tbz %[hbt], 33, dne%=   \n"
+                                    "          sevl                    \n"
+                                    "rty%=:    wfe                     \n"
+                                    "          ldxr %[hbt], [%[w1]]    \n"
+                                    "          tbnz %[hbt], 33, rty%=  \n"
+                                    "dne%=:                            \n"
+                                    : [hbt] "=&r"(hbt_state)
+                                    : [w1] "r"((&bkt->w1))
+                                    : "memory");
+#else
+                       do {
+                               hbt_state = __atomic_load_n(&bkt->w1,
+                                                           __ATOMIC_RELAXED);
+                       } while (hbt_state & BIT_ULL(33));
+#endif
+
+                       if (!(hbt_state & BIT_ULL(34))) {
+                               cnxk_tim_bkt_dec_lock(bkt);
+                               goto __retry;
+                       }
+               }
+       }
+       /* Insert the work. */
+       rem = cnxk_tim_bkt_fetch_rem(lock_sema);
+
+       if (!rem) {
+               if (flags & CNXK_TIM_ENA_FB)
+                       chunk = cnxk_tim_refill_chunk(bkt, mirr_bkt, tim_ring);
+               if (flags & CNXK_TIM_ENA_DFB)
+                       chunk = cnxk_tim_insert_chunk(bkt, mirr_bkt, tim_ring);
+
+               if (unlikely(chunk == NULL)) {
+                       bkt->chunk_remainder = 0;
+                       tim->impl_opaque[0] = 0;
+                       tim->impl_opaque[1] = 0;
+                       tim->state = RTE_EVENT_TIMER_ERROR;
+                       cnxk_tim_bkt_dec_lock(bkt);
+                       return -ENOMEM;
+               }
+               mirr_bkt->current_chunk = (uintptr_t)chunk;
+               bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
+       } else {
+               chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk;
+               chunk += tim_ring->nb_chunk_slots - rem;
+       }
+
+       /* Copy work entry. */
+       *chunk = *pent;
+
+       tim->impl_opaque[0] = (uintptr_t)chunk;
+       tim->impl_opaque[1] = (uintptr_t)bkt;
+       __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+       cnxk_tim_bkt_inc_nent(bkt);
+       cnxk_tim_bkt_dec_lock_relaxed(bkt);
+
+       return 0;
+}
+
+static __rte_always_inline int
+cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring,
+                     const uint32_t rel_bkt, struct rte_event_timer *const tim,
+                     const struct cnxk_tim_ent *const pent,
+                     const uint8_t flags)
+{
+       struct cnxk_tim_bkt *mirr_bkt;
+       struct cnxk_tim_ent *chunk;
+       struct cnxk_tim_bkt *bkt;
+       uint64_t lock_sema;
+       int16_t rem;
+
+__retry:
+       cnxk_tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
+       /* Get Bucket sema*/
+       lock_sema = cnxk_tim_bkt_fetch_sema_lock(bkt);
+
+       /* Bucket related checks. */
+       if (unlikely(cnxk_tim_bkt_get_hbt(lock_sema))) {
+               if (cnxk_tim_bkt_get_nent(lock_sema) != 0) {
+                       uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+                       asm volatile(PLT_CPU_FEATURE_PREAMBLE
+                                    "          ldxr %[hbt], [%[w1]]    \n"
+                                    "          tbz %[hbt], 33, dne%=   \n"
+                                    "          sevl                    \n"
+                                    "rty%=:    wfe                     \n"
+                                    "          ldxr %[hbt], [%[w1]]    \n"
+                                    "          tbnz %[hbt], 33, rty%=  \n"
+                                    "dne%=:                            \n"
+                                    : [hbt] "=&r"(hbt_state)
+                                    : [w1] "r"((&bkt->w1))
+                                    : "memory");
+#else
+                       do {
+                               hbt_state = __atomic_load_n(&bkt->w1,
+                                                           __ATOMIC_RELAXED);
+                       } while (hbt_state & BIT_ULL(33));
+#endif
+
+                       if (!(hbt_state & BIT_ULL(34))) {
+                               cnxk_tim_bkt_dec_lock(bkt);
+                               goto __retry;
+                       }
+               }
+       }
+
+       rem = cnxk_tim_bkt_fetch_rem(lock_sema);
+       if (rem < 0) {
+               cnxk_tim_bkt_dec_lock(bkt);
+#ifdef RTE_ARCH_ARM64
+               asm volatile(PLT_CPU_FEATURE_PREAMBLE
+                            "          ldxr %[rem], [%[crem]]  \n"
+                            "          tbz %[rem], 63, dne%=           \n"
+                            "          sevl                            \n"
+                            "rty%=:    wfe                             \n"
+                            "          ldxr %[rem], [%[crem]]  \n"
+                            "          tbnz %[rem], 63, rty%=          \n"
+                            "dne%=:                                    \n"
+                            : [rem] "=&r"(rem)
+                            : [crem] "r"(&bkt->w1)
+                            : "memory");
+#else
+               while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
+                      0)
+                       ;
+#endif
+               goto __retry;
+       } else if (!rem) {
+               /* Only one thread can be here*/
+               if (flags & CNXK_TIM_ENA_FB)
+                       chunk = cnxk_tim_refill_chunk(bkt, mirr_bkt, tim_ring);
+               if (flags & CNXK_TIM_ENA_DFB)
+                       chunk = cnxk_tim_insert_chunk(bkt, mirr_bkt, tim_ring);
+
+               if (unlikely(chunk == NULL)) {
+                       tim->impl_opaque[0] = 0;
+                       tim->impl_opaque[1] = 0;
+                       tim->state = RTE_EVENT_TIMER_ERROR;
+                       cnxk_tim_bkt_set_rem(bkt, 0);
+                       cnxk_tim_bkt_dec_lock(bkt);
+                       return -ENOMEM;
+               }
+               *chunk = *pent;
+               if (cnxk_tim_bkt_fetch_lock(lock_sema)) {
+                       do {
+                               lock_sema = __atomic_load_n(&bkt->w1,
+                                                           __ATOMIC_RELAXED);
+                       } while (cnxk_tim_bkt_fetch_lock(lock_sema) - 1);
+               }
+               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               mirr_bkt->current_chunk = (uintptr_t)chunk;
+               __atomic_store_n(&bkt->chunk_remainder,
+                                tim_ring->nb_chunk_slots - 1,
+                                __ATOMIC_RELEASE);
+       } else {
+               chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk;
+               chunk += tim_ring->nb_chunk_slots - rem;
+               *chunk = *pent;
+       }
+
+       tim->impl_opaque[0] = (uintptr_t)chunk;
+       tim->impl_opaque[1] = (uintptr_t)bkt;
+       __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+       cnxk_tim_bkt_inc_nent(bkt);
+       cnxk_tim_bkt_dec_lock_relaxed(bkt);
+
+       return 0;
+}
+
 #endif /* __CNXK_TIM_WORKER_H__ */