common/cnxk: add new PCI IDs to supported devices
[dpdk.git] / drivers / event / cnxk / cnxk_tim_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_tim_evdev.h"
6 #include "cnxk_tim_worker.h"
7
8 static inline int
9 cnxk_tim_arm_checks(const struct cnxk_tim_ring *const tim_ring,
10                     struct rte_event_timer *const tim)
11 {
12         if (unlikely(tim->state)) {
13                 tim->state = RTE_EVENT_TIMER_ERROR;
14                 rte_errno = EALREADY;
15                 goto fail;
16         }
17
18         if (unlikely(!tim->timeout_ticks ||
19                      tim->timeout_ticks > tim_ring->nb_bkts)) {
20                 tim->state = tim->timeout_ticks ?
21                                            RTE_EVENT_TIMER_ERROR_TOOLATE :
22                                            RTE_EVENT_TIMER_ERROR_TOOEARLY;
23                 rte_errno = EINVAL;
24                 goto fail;
25         }
26
27         return 0;
28
29 fail:
30         return -EINVAL;
31 }
32
33 static inline void
34 cnxk_tim_format_event(const struct rte_event_timer *const tim,
35                       struct cnxk_tim_ent *const entry)
36 {
37         entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
38                     (tim->ev.event & 0xFFFFFFFFF);
39         entry->wqe = tim->ev.u64;
40 }
41
42 static inline void
43 cnxk_tim_sync_start_cyc(struct cnxk_tim_ring *tim_ring)
44 {
45         uint64_t cur_cyc = cnxk_tim_cntvct();
46         uint32_t real_bkt;
47
48         if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
49                 real_bkt = plt_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
50                 cur_cyc = cnxk_tim_cntvct();
51
52                 tim_ring->ring_start_cyc =
53                         cur_cyc - (real_bkt * tim_ring->tck_int);
54                 tim_ring->last_updt_cyc = cur_cyc;
55         }
56 }
57
58 static __rte_always_inline uint16_t
59 cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
60                          struct rte_event_timer **tim, const uint16_t nb_timers,
61                          const uint8_t flags)
62 {
63         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
64         struct cnxk_tim_ent entry;
65         uint16_t index;
66         int ret;
67
68         cnxk_tim_sync_start_cyc(tim_ring);
69         for (index = 0; index < nb_timers; index++) {
70                 if (cnxk_tim_arm_checks(tim_ring, tim[index]))
71                         break;
72
73                 cnxk_tim_format_event(tim[index], &entry);
74                 if (flags & CNXK_TIM_SP)
75                         ret = cnxk_tim_add_entry_sp(tim_ring,
76                                                     tim[index]->timeout_ticks,
77                                                     tim[index], &entry, flags);
78                 if (flags & CNXK_TIM_MP)
79                         ret = cnxk_tim_add_entry_mp(tim_ring,
80                                                     tim[index]->timeout_ticks,
81                                                     tim[index], &entry, flags);
82
83                 if (unlikely(ret)) {
84                         rte_errno = -ret;
85                         break;
86                 }
87         }
88
89         if (flags & CNXK_TIM_ENA_STATS)
90                 __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
91
92         return index;
93 }
94
95 #define FP(_name, _f3, _f2, _f1, _flags)                                       \
96         uint16_t __rte_noinline cnxk_tim_arm_burst_##_name(                    \
97                 const struct rte_event_timer_adapter *adptr,                   \
98                 struct rte_event_timer **tim, const uint16_t nb_timers)        \
99         {                                                                      \
100                 return cnxk_tim_timer_arm_burst(adptr, tim, nb_timers,         \
101                                                 _flags);                       \
102         }
103 TIM_ARM_FASTPATH_MODES
104 #undef FP
105
106 static __rte_always_inline uint16_t
107 cnxk_tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
108                             struct rte_event_timer **tim,
109                             const uint64_t timeout_tick,
110                             const uint16_t nb_timers, const uint8_t flags)
111 {
112         struct cnxk_tim_ent entry[CNXK_TIM_MAX_BURST] __rte_cache_aligned;
113         struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
114         uint16_t set_timers = 0;
115         uint16_t arr_idx = 0;
116         uint16_t idx;
117         int ret;
118
119         if (unlikely(!timeout_tick || timeout_tick > tim_ring->nb_bkts)) {
120                 const enum rte_event_timer_state state =
121                         timeout_tick ? RTE_EVENT_TIMER_ERROR_TOOLATE :
122                                              RTE_EVENT_TIMER_ERROR_TOOEARLY;
123                 for (idx = 0; idx < nb_timers; idx++)
124                         tim[idx]->state = state;
125
126                 rte_errno = EINVAL;
127                 return 0;
128         }
129
130         cnxk_tim_sync_start_cyc(tim_ring);
131         while (arr_idx < nb_timers) {
132                 for (idx = 0; idx < CNXK_TIM_MAX_BURST && (arr_idx < nb_timers);
133                      idx++, arr_idx++) {
134                         cnxk_tim_format_event(tim[arr_idx], &entry[idx]);
135                 }
136                 ret = cnxk_tim_add_entry_brst(tim_ring, timeout_tick,
137                                               &tim[set_timers], entry, idx,
138                                               flags);
139                 set_timers += ret;
140                 if (ret != idx)
141                         break;
142         }
143
144         if (flags & CNXK_TIM_ENA_STATS)
145                 __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
146                                    __ATOMIC_RELAXED);
147
148         return set_timers;
149 }
150
151 #define FP(_name, _f2, _f1, _flags)                                            \
152         uint16_t __rte_noinline cnxk_tim_arm_tmo_tick_burst_##_name(           \
153                 const struct rte_event_timer_adapter *adptr,                   \
154                 struct rte_event_timer **tim, const uint64_t timeout_tick,     \
155                 const uint16_t nb_timers)                                      \
156         {                                                                      \
157                 return cnxk_tim_timer_arm_tmo_brst(adptr, tim, timeout_tick,   \
158                                                    nb_timers, _flags);         \
159         }
160 TIM_ARM_TMO_FASTPATH_MODES
161 #undef FP
162
163 uint16_t
164 cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
165                             struct rte_event_timer **tim,
166                             const uint16_t nb_timers)
167 {
168         uint16_t index;
169         int ret;
170
171         RTE_SET_USED(adptr);
172         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
173         for (index = 0; index < nb_timers; index++) {
174                 if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
175                         rte_errno = EALREADY;
176                         break;
177                 }
178
179                 if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
180                         rte_errno = EINVAL;
181                         break;
182                 }
183                 ret = cnxk_tim_rm_entry(tim[index]);
184                 if (ret) {
185                         rte_errno = -ret;
186                         break;
187                 }
188         }
189
190         return index;
191 }