summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
092454d)
Use c11 atomics with RELAXED ordering instead of rte_atomic ops which
enforce unnessary barriers on arm64.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
- stats->evtim_exp_count = rte_atomic64_read(&tim_ring->arm_cnt);
+ stats->evtim_exp_count = __atomic_load_n(&tim_ring->arm_cnt,
+ __ATOMIC_RELAXED);
stats->ev_enq_count = stats->evtim_exp_count;
stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
&tim_ring->fast_div);
stats->ev_enq_count = stats->evtim_exp_count;
stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
&tim_ring->fast_div);
{
struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
{
struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
- rte_atomic64_clear(&tim_ring->arm_cnt);
+ __atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
struct otx2_tim_bkt *bkt;
struct rte_mempool *chunk_pool;
struct rte_reciprocal_u64 fast_div;
struct otx2_tim_bkt *bkt;
struct rte_mempool *chunk_pool;
struct rte_reciprocal_u64 fast_div;
- rte_atomic64_t arm_cnt;
uint8_t prod_type_sp;
uint8_t enable_stats;
uint8_t disable_npa;
uint8_t prod_type_sp;
uint8_t enable_stats;
uint8_t disable_npa;
}
if (flags & OTX2_TIM_ENA_STATS)
}
if (flags & OTX2_TIM_ENA_STATS)
- rte_atomic64_add(&tim_ring->arm_cnt, index);
+ __atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
break;
}
if (flags & OTX2_TIM_ENA_STATS)
break;
}
if (flags & OTX2_TIM_ENA_STATS)
- rte_atomic64_add(&tim_ring->arm_cnt, set_timers);
+ __atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
+ __ATOMIC_RELAXED);