Replace rte_atomic ops with C11 atomics.
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_tailq.h>
int ret;
t4_os_lock(&ce->lock);
- if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ if (__atomic_sub_fetch(&ce->refcnt, 1, __ATOMIC_RELAXED) == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
if (!first_free)
first_free = e;
} else {
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (!rte_atomic32_read(&ce->refcnt)) {
+ if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- rte_atomic32_set(&ce->refcnt, 1);
+ __atomic_store_n(&ce->refcnt, 1,
+ __ATOMIC_RELAXED);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- rte_atomic32_inc(&ce->refcnt);
+ __atomic_add_fetch(&ce->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&ce->lock);
}
for (i = 0; i < ctbl->clipt_size; i++) {
t4_os_lock_init(&ctbl->cl_list[i].lock);
- rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ ctbl->cl_list[i].refcnt = 0;
}
return ctbl;
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- rte_atomic32_t refcnt; /* entry reference count */
+ u32 refcnt; /* entry reference count */
};
struct clip_tbl {
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_tailq.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_tailq.h>
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- rte_atomic32_dec(&t->conns_in_use);
+ __atomic_sub_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- rte_atomic32_dec(&t->hash_tids_in_use);
+ __atomic_sub_fetch(&t->hash_tids_in_use, 1,
+ __ATOMIC_RELAXED);
} else {
if (family == FILTER_TYPE_IPV4)
- rte_atomic32_dec(&t->tids_in_use);
+ __atomic_sub_fetch(&t->tids_in_use, 1,
+ __ATOMIC_RELAXED);
}
}
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- rte_atomic32_inc(&t->hash_tids_in_use);
+ __atomic_add_fetch(&t->hash_tids_in_use, 1,
+ __ATOMIC_RELAXED);
} else {
if (family == FILTER_TYPE_IPV4)
- rte_atomic32_inc(&t->tids_in_use);
+ __atomic_add_fetch(&t->tids_in_use, 1,
+ __ATOMIC_RELAXED);
}
- rte_atomic32_inc(&t->conns_in_use);
+ __atomic_add_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED);
}
/**
t->afree = NULL;
t->atids_in_use = 0;
- rte_atomic32_init(&t->tids_in_use);
- rte_atomic32_set(&t->tids_in_use, 0);
- rte_atomic32_init(&t->conns_in_use);
- rte_atomic32_set(&t->conns_in_use, 0);
+ t->tids_in_use = 0;
+ t->conns_in_use = 0;
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- rte_atomic32_t tids_in_use;
+ u32 tids_in_use;
/* TIDs in the HASH */
- rte_atomic32_t hash_tids_in_use;
- rte_atomic32_t conns_in_use;
+ u32 hash_tids_in_use;
+ u32 conns_in_use;
rte_spinlock_t atid_lock __rte_cache_aligned;
rte_spinlock_t ftid_lock;
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (rte_atomic32_read(&e->refcnt) != 0)
- rte_atomic32_dec(&e->refcnt);
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
+ __atomic_sub_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
}
/**
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
if (!first_free)
first_free = e;
} else {
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (!rte_atomic32_read(&e->refcnt)) {
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- rte_atomic32_set(&e->refcnt, 1);
+ __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- rte_atomic32_inc(&e->refcnt);
+ __atomic_add_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&e->lock);
}
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
t4_os_lock_init(&d->l2tab[i].lock);
- rte_atomic32_set(&d->l2tab[i].refcnt, 0);
+ d->l2tab[i].refcnt = 0;
}
return d;
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- rte_atomic32_t refcnt; /* entry reference count */
+ u32 refcnt; /* entry reference count */
};
struct l2t_data {
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- rte_atomic32_add(&entry->refcnt, 1);
+ __atomic_add_fetch(&entry->refcnt, 1, __ATOMIC_RELAXED);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- rte_atomic32_set(&entry->refcnt, 1);
+ __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- rte_atomic32_set(&entry->refcnt, 1);
+ __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_USED;
}
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- rte_atomic32_clear(&entry->refcnt);
+ __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_UNUSED;
}
return -EINVAL;
}
- if (rte_atomic32_read(&entry->refcnt) == 1)
+ if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = rte_atomic32_sub_return(&entry->refcnt, 1);
+ ret = __atomic_sub_fetch(&entry->refcnt, 1, __ATOMIC_RELAXED);
if (ret == 0) {
reset_mpstcam_entry(entry);
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- rte_atomic32_t refcnt;
+ u32 refcnt;
};
struct mpstcam_table {
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (!rte_atomic32_read(&e->refcnt)) {
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
if (!first_free)
first_free = e;
} else {
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (!rte_atomic32_read(&e->refcnt)) {
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- rte_atomic32_set(&e->refcnt, 1);
+ __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
} else {
- rte_atomic32_inc(&e->refcnt);
+ __atomic_add_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
}
t4_os_unlock(&e->lock);
}
void cxgbe_smt_release(struct smt_entry *e)
{
- if (rte_atomic32_read(&e->refcnt))
- rte_atomic32_dec(&e->refcnt);
+ if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
+ __atomic_sub_fetch(&e->refcnt, 1, __ATOMIC_RELAXED);
}
/**
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, RTE_ETHER_ADDR_LEN);
t4_os_lock_init(&s->smtab[i].lock);
- rte_atomic32_set(&s->smtab[i].refcnt, 0);
+ s->smtab[i].refcnt = 0;
}
return s;
}
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- rte_atomic32_t refcnt;
+ u32 refcnt;
rte_spinlock_t lock;
};