For SA outbound packets, rte_atomic64_add_return is used to generate
SQN atomically. Use C11 atomics with RELAXED ordering for outbound SQN
update instead of rte_atomic ops which enforce unnecessary barriers on
aarch64.
Signed-off-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
n = *num;
if (SQN_ATOMIC(sa))
- sqn = (uint64_t)rte_atomic64_add_return(&sa->sqn.outb.atom, n);
+ sqn = __atomic_add_fetch(&sa->sqn.outb, n, __ATOMIC_RELAXED);
else {
- sqn = sa->sqn.outb.raw + n;
- sa->sqn.outb.raw = sqn;
+ sqn = sa->sqn.outb + n;
+ sa->sqn.outb = sqn;
}
/* overflow */
{
uint8_t algo_type;
- sa->sqn.outb.raw = 1;
+ sa->sqn.outb = 1;
algo_type = sa->algo_type;
* place from other frequently accesed data.
*/
union {
- union {
- rte_atomic64_t atom;
- uint64_t raw;
- } outb;
+ uint64_t outb;
struct {
uint32_t rdidx; /* read index */
uint32_t wridx; /* write index */