static inline bool
match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)
{
- if (!memcmp(eth_addr, entry->eth_addr, ETHER_ADDR_LEN) &&
- !memcmp(mask, entry->mask, ETHER_ADDR_LEN))
+ if (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) &&
+ !memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN))
return true;
return false;
}
return NULL;
for (i = 0; i < t->size; i++) {
- if (entry[i].state == MPS_ENTRY_UNUSED)
+ if (entry[i].state == MPS_ENTRY_UNUSED ||
+ entry[i].state == MPS_ENTRY_RAWF)
continue; /* entry is not being used */
if (match_entry(&entry[i], eth_addr, mask))
return &entry[i];
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- rte_atomic32_add(&entry->refcnt, 1);
+ __atomic_add_fetch(&entry->refcnt, 1, __ATOMIC_RELAXED);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
/* Fill in the new values */
entry = &mpstcam->entry[ret];
- memcpy(entry->eth_addr, eth_addr, ETHER_ADDR_LEN);
- memcpy(entry->mask, mask, ETHER_ADDR_LEN);
- rte_atomic32_set(&entry->refcnt, 1);
+ memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
+ memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
+ __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
/* idx can now be different from what user provided */
entry = &mpstcam->entry[idx];
- memcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);
+ memcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN);
+ memset(entry->mask, ~0, RTE_ETHER_ADDR_LEN);
/* NOTE: we have considered the case that idx returned by t4_change_mac
* will be different from the user provided value only if user
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- rte_atomic32_set(&entry->refcnt, 1);
+ __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_USED;
}
*/
static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
- memset(entry->eth_addr, 0, ETHER_ADDR_LEN);
- memset(entry->mask, 0, ETHER_ADDR_LEN);
- rte_atomic32_clear(&entry->refcnt);
+ memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
+ memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
+ __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
entry->state = MPS_ENTRY_UNUSED;
}
return -EOPNOTSUPP;
t4_os_write_lock(&t->lock);
entry = &t->entry[idx];
- if (entry->state == MPS_ENTRY_UNUSED) {
+ if (entry->state != MPS_ENTRY_USED) {
t4_os_write_unlock(&t->lock);
return -EINVAL;
}
- if (rte_atomic32_read(&entry->refcnt) == 1)
+ if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = rte_atomic32_sub_return(&entry->refcnt, 1);
+ ret = __atomic_sub_fetch(&entry->refcnt, 1, __ATOMIC_RELAXED);
if (ret == 0) {
reset_mpstcam_entry(entry);
return ret;
}
+int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
+{
+ struct adapter *adap = pi->adapter;
+ struct mps_tcam_entry *entry;
+ struct mpstcam_table *t;
+ u16 rawf_idx;
+ int ret = 0;
+
+ t = adap->mpstcam;
+ if (adap->params.rawf_size == 0 || t == NULL)
+ return -EOPNOTSUPP;
+
+ t4_os_write_lock(&t->lock);
+ rawf_idx = adap->params.rawf_start + pi->port_id;
+ entry = &t->entry[rawf_idx];
+ if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ goto out_unlock;
+
+ ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
+ entry->mask, rawf_idx, 0, pi->port_id,
+ false);
+ if (ret < 0)
+ goto out_unlock;
+
+ __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+
+out_unlock:
+ t4_os_write_unlock(&t->lock);
+ return ret;
+}
+
+int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
+{
+ struct adapter *adap = pi->adapter;
+ struct mps_tcam_entry *entry;
+ struct mpstcam_table *t;
+ u16 rawf_idx;
+ int ret = 0;
+
+ t = adap->mpstcam;
+ if (adap->params.rawf_size == 0 || t == NULL)
+ return -EOPNOTSUPP;
+
+ t4_os_write_lock(&t->lock);
+ rawf_idx = adap->params.rawf_start + pi->port_id;
+ entry = &t->entry[rawf_idx];
+ if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ goto out_unlock;
+
+ ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
+ entry->mask, rawf_idx, 0, pi->port_id,
+ false);
+ if (ret < 0)
+ goto out_unlock;
+
+ __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+
+out_unlock:
+ t4_os_write_unlock(&t->lock);
+ return ret;
+}
+
struct mpstcam_table *t4_init_mpstcam(struct adapter *adap)
{
+ u16 size = adap->params.arch.mps_tcam_size;
struct mpstcam_table *t;
int i;
- u16 size = adap->params.arch.mps_tcam_size;
t = t4_os_alloc(sizeof(*t) + size * sizeof(struct mps_tcam_entry));
if (!t)
t->entry[i].idx = i;
}
+ /* RAW MAC entries are reserved for match-all wildcard to
+ * match all promiscuous traffic. So, mark them special.
+ */
+ for (i = 0; i < adap->params.rawf_size; i++)
+ t->entry[adap->params.rawf_start + i].state = MPS_ENTRY_RAWF;
+
/* first entry is used by chip. this is overwritten only
* in t4_cleanup_mpstcam()
*/