#define CNXK_TIM_NB_CHUNK_SLOTS(sz) (((sz) / CNXK_TIM_CHUNK_ALIGNMENT) - 1)
#define CNXK_TIM_MIN_CHUNK_SLOTS (0x1)
#define CNXK_TIM_MAX_CHUNK_SLOTS (0x1FFE)
+#define CNXK_TIM_MAX_POOL_CACHE_SZ (128)
#define CN9K_TIM_MIN_TMO_TKS (256)
#define CNXK_TIM_DISABLE_NPA "tim_disable_npa"
#define CNXK_TIM_CHNK_SLOTS "tim_chnk_slots"
+#define CNXK_TIM_STATS_ENA "tim_stats_ena"
#define CNXK_TIM_RINGS_LMT "tim_rings_lmt"
+#define CNXK_TIM_RING_CTL "tim_ring_ctl"
+#define CNXK_TIM_EXT_CLK "tim_eclk_freq"
-#define CNXK_TIM_SP 0x1
-#define CNXK_TIM_MP 0x2
-#define CNXK_TIM_ENA_FB 0x10
-#define CNXK_TIM_ENA_DFB 0x20
+#define CNXK_TIM_SP 0x1
+#define CNXK_TIM_MP 0x2
+#define CNXK_TIM_ENA_FB 0x10
+#define CNXK_TIM_ENA_DFB 0x20
+#define CNXK_TIM_ENA_STATS 0x40
#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
#define TIM_BUCKET_W1_M_CHUNK_REMAINDER \
#define TIM_BUCKET_SEMA_WLOCK \
(TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+struct cnxk_tim_ctl {
+ uint16_t ring;
+ uint16_t chunk_slots;
+ uint16_t disable_npa;
+ uint16_t enable_stats;
+};
+
struct cnxk_tim_evdev {
struct roc_tim tim;
struct rte_eventdev *event_dev;
uint32_t chunk_sz;
/* Dev args */
uint8_t disable_npa;
- uint16_t chunk_slots;
- uint16_t min_ring_cnt;
-};
-
-enum cnxk_tim_clk_src {
- CNXK_TIM_CLK_SRC_10NS = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
- CNXK_TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
- CNXK_TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
- CNXK_TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+ uint32_t chunk_slots;
+ uint32_t min_ring_cnt;
+ uint8_t enable_stats;
+ uint16_t ring_ctl_cnt;
+ uint64_t ext_clk_freq[ROC_TIM_CLK_SRC_INVALID];
+ struct cnxk_tim_ctl *ring_ctl_data;
};
struct cnxk_tim_bkt {
struct rte_reciprocal_u64 fast_bkt;
uint64_t arm_cnt;
uint8_t prod_type_sp;
+ uint8_t enable_stats;
uint8_t disable_npa;
uint8_t ena_dfb;
+ uint8_t ena_periodic;
uint16_t ring_id;
uint32_t aura;
uint64_t nb_timers;
uint64_t max_tout;
uint64_t nb_chunks;
uint64_t chunk_sz;
- enum cnxk_tim_clk_src clk_src;
+ enum roc_tim_clk_src clk_src;
} __rte_cache_aligned;
struct cnxk_tim_ent {
return mz->addr;
}
-static inline uint64_t
-cnxk_tim_min_tmo_ticks(uint64_t freq)
-{
- if (roc_model_runtime_is_cn9k())
- return CN9K_TIM_MIN_TMO_TKS;
- else /* CN10K min tick is of 1us */
- return freq / USECPERSEC;
-}
-
-static inline uint64_t
-cnxk_tim_min_resolution_ns(uint64_t freq)
+static inline long double
+cnxk_tim_ns_per_tck(uint64_t freq)
{
- return NSECPERSEC / freq;
-}
-
-static inline enum roc_tim_clk_src
-cnxk_tim_convert_clk_src(enum cnxk_tim_clk_src clk_src)
-{
- switch (clk_src) {
- case RTE_EVENT_TIMER_ADAPTER_CPU_CLK:
- return roc_model_runtime_is_cn9k() ? ROC_TIM_CLK_SRC_10NS :
- ROC_TIM_CLK_SRC_GTI;
- default:
- return ROC_TIM_CLK_SRC_INVALID;
- }
+ return (long double)NSECPERSEC / freq;
}
#ifdef RTE_ARCH_ARM64
}
#endif
+static inline enum roc_tim_clk_src
+cnxk_tim_convert_clk_src(enum rte_event_timer_adapter_clk_src clk_src)
+{
+ switch (clk_src) {
+ case RTE_EVENT_TIMER_ADAPTER_CPU_CLK:
+ return ROC_TIM_CLK_SRC_GTI;
+ case RTE_EVENT_TIMER_ADAPTER_EXT_CLK0:
+ return ROC_TIM_CLK_SRC_10NS;
+ case RTE_EVENT_TIMER_ADAPTER_EXT_CLK1:
+ return ROC_TIM_CLK_SRC_GPIO;
+ case RTE_EVENT_TIMER_ADAPTER_EXT_CLK2:
+ return ROC_TIM_CLK_SRC_PTP;
+ case RTE_EVENT_TIMER_ADAPTER_EXT_CLK3:
+ return roc_model_constant_is_cn9k() ? ROC_TIM_CLK_SRC_INVALID :
+ ROC_TIM_CLK_SRC_SYNCE;
+ default:
+ return ROC_TIM_CLK_SRC_INVALID;
+ }
+}
+
+static inline int
+cnxk_tim_get_clk_freq(struct cnxk_tim_evdev *dev, enum roc_tim_clk_src clk_src,
+ uint64_t *freq)
+{
+ if (freq == NULL)
+ return -EINVAL;
+
+ PLT_SET_USED(dev);
+ switch (clk_src) {
+ case ROC_TIM_CLK_SRC_GTI:
+ *freq = cnxk_tim_cntfrq();
+ break;
+ case ROC_TIM_CLK_SRC_10NS:
+ *freq = 1E8;
+ break;
+ case ROC_TIM_CLK_SRC_GPIO:
+ case ROC_TIM_CLK_SRC_PTP:
+ case ROC_TIM_CLK_SRC_SYNCE:
+ *freq = dev->ext_clk_freq[clk_src];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define TIM_ARM_FASTPATH_MODES \
- FP(sp, 0, 0, CNXK_TIM_ENA_DFB | CNXK_TIM_SP) \
- FP(mp, 0, 1, CNXK_TIM_ENA_DFB | CNXK_TIM_MP) \
- FP(fb_sp, 1, 0, CNXK_TIM_ENA_FB | CNXK_TIM_SP) \
- FP(fb_mp, 1, 1, CNXK_TIM_ENA_FB | CNXK_TIM_MP)
+ FP(sp, 0, 0, 0, CNXK_TIM_ENA_DFB | CNXK_TIM_SP) \
+ FP(mp, 0, 0, 1, CNXK_TIM_ENA_DFB | CNXK_TIM_MP) \
+ FP(fb_sp, 0, 1, 0, CNXK_TIM_ENA_FB | CNXK_TIM_SP) \
+ FP(fb_mp, 0, 1, 1, CNXK_TIM_ENA_FB | CNXK_TIM_MP) \
+ FP(stats_sp, 1, 0, 0, \
+ CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_DFB | CNXK_TIM_SP) \
+ FP(stats_mp, 1, 0, 1, \
+ CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_DFB | CNXK_TIM_MP) \
+ FP(stats_fb_sp, 1, 1, 0, \
+ CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_FB | CNXK_TIM_SP) \
+ FP(stats_fb_mp, 1, 1, 1, \
+ CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_FB | CNXK_TIM_MP)
#define TIM_ARM_TMO_FASTPATH_MODES \
- FP(dfb, 0, CNXK_TIM_ENA_DFB) \
- FP(fb, 1, CNXK_TIM_ENA_FB)
+ FP(dfb, 0, 0, CNXK_TIM_ENA_DFB) \
+ FP(fb, 0, 1, CNXK_TIM_ENA_FB) \
+ FP(stats_dfb, 1, 0, CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_DFB) \
+ FP(stats_fb, 1, 1, CNXK_TIM_ENA_STATS | CNXK_TIM_ENA_FB)
-#define FP(_name, _f2, _f1, flags) \
+#define FP(_name, _f3, _f2, _f1, flags) \
uint16_t cnxk_tim_arm_burst_##_name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, const uint16_t nb_timers);
TIM_ARM_FASTPATH_MODES
#undef FP
-#define FP(_name, _f1, flags) \
+#define FP(_name, _f2, _f1, flags) \
uint16_t cnxk_tim_arm_tmo_tick_burst_##_name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, const uint64_t timeout_tick, \
TIM_ARM_TMO_FASTPATH_MODES
#undef FP
+uint16_t
+cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim,
+ const uint16_t nb_timers);
+
int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
- const struct rte_event_timer_adapter_ops **ops);
+ const struct event_timer_adapter_ops **ops);
void cnxk_tim_init(struct roc_sso *sso);
void cnxk_tim_fini(void);