#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
+#include "rte_pmd_mlx5.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
const struct rte_flow_item_vlan nic_mask = {
.tci = RTE_BE16(UINT16_MAX),
.inner_type = RTE_BE16(UINT16_MAX),
+ .has_more_vlan = 1,
};
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret;
struct mlx5_flow_dv_encap_decap_resource, entry);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
dev_flow->dv.encap_decap = cache_resource;
return 0;
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
flow_dv_encap_decap_resource_match,
(void *)cache_resource)) {
dev_flow->dv.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
}
int cnt, ret;
MLX5_ASSERT(tbl);
- cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
+ cnt = __atomic_load_n(&tbl_data->jump.refcnt, __ATOMIC_ACQUIRE);
if (!cnt) {
ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
(tbl->obj, &tbl_data->jump.action);
DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
}
- rte_atomic32_inc(&tbl_data->jump.refcnt);
+ __atomic_fetch_add(&tbl_data->jump.refcnt, 1, __ATOMIC_RELEASE);
dev_flow->handle->rix_jump = tbl_data->idx;
dev_flow->dv.jump = &tbl_data->jump;
return 0;
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_default_miss_resource *cache_resource =
&sh->default_miss;
- int cnt = rte_atomic32_read(&cache_resource->refcnt);
+ int cnt = __atomic_load_n(&cache_resource->refcnt, __ATOMIC_ACQUIRE);
if (!cnt) {
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
(void *)cache_resource->action, cnt);
}
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_fetch_add(&cache_resource->refcnt, 1, __ATOMIC_RELEASE);
return 0;
}
DRV_LOG(DEBUG, "port id action resource resource %p: "
"refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->rix_port_id_action = idx;
dev_flow->dv.port_id_action = cache_resource;
return 0;
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
dev_flow->handle->rix_port_id_action, cache_resource,
next);
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
}
DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
"refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->dvh.rix_push_vlan = idx;
dev_flow->dv.push_vlan_res = cache_resource;
return 0;
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&sh->push_vlan_action_list,
dev_flow->handle->dvh.rix_push_vlan,
dev_flow->dv.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
}
/**
entry);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->dvh.modify_hdr = cache_resource;
return 0;
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry,
flow_dv_modify_hdr_resource_match,
(void *)cache_resource)) {
dev_flow->handle->dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
}
.transfer = !!attr->transfer,
.fdb_def_rule = !!priv->fdb_def_rule,
};
+ const struct rte_eth_hairpin_conf *conf;
if (items == NULL)
return -1;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
- error);
+ true, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
actions,
"no fate action is found");
}
- /* Continue validation for Xcap and VLAN actions.*/
+ /*
+ * Continue validation for Xcap and VLAN actions.
+ * If hairpin is working in explicit TX rule mode, there is no actions
+ * splitting and the validation of hairpin ingress flow should be the
+ * same as other standard flows.
+ */
if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
MLX5_FLOW_VLAN_ACTIONS)) &&
(queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
+ ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
+ conf->tx_explicit != 0))) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
"multiple VLAN actions");
}
}
- /* Hairpin flow will add one more TAG action. */
+ /*
+ * Hairpin flow will add one more TAG action in TX implicit mode.
+ * In TX explicit mode, there will be no hairpin flow ID.
+ */
if (hairpin > 0)
rw_act_num += MLX5_ACT_NUM_SET_TAG;
/* extra metadata enabled: one more TAG action will be add. */
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.type = RTE_BE16(0xffff),
+ .has_vlan = 0,
};
- void *headers_m;
- void *headers_v;
+ void *hdrs_m;
+ void *hdrs_v;
char *l24_v;
unsigned int i;
if (!eth_m)
eth_m = &nic_mask;
if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
ð_m->dst, sizeof(eth_m->dst));
/* The value must be in the range of the mask. */
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
ð_m->src, sizeof(eth_m->src));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
- if (eth_v->type) {
- /* When ethertype is present set mask for tagged VLAN. */
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
- /* Set value for tagged VLAN if ethertype is 802.1Q. */
- if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
- eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
- 1);
- /* Return here to avoid setting match on ethertype. */
- return;
- }
- }
/*
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* ethertype, and use ip_version field instead.
* eCPRI over Ether layer will use type value 0xAEFE.
*/
- if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
- eth_m->type == 0xFFFF) {
- flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
- } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
- eth_m->type == 0xFFFF) {
- flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
- } else {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ if (eth_m->type == 0xFFFF) {
+ /* Set cvlan_tag mask for any single\multi\un-tagged case. */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ switch (eth_v->type) {
+ case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_QINQ):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+ return;
+ default:
+ break;
+ }
}
+ if (eth_m->has_vlan) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ if (eth_v->has_vlan) {
+ /*
+ * Here, when also has_more_vlan field in VLAN item is
+ * not set, only single-tagged packets will be matched.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+ return;
+ }
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
}
/**
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
- void *headers_m;
- void *headers_v;
+ void *hdrs_m;
+ void *hdrs_v;
uint16_t tci_m;
uint16_t tci_v;
if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
} else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
/*
* This is workaround, masks are not supported,
* and pre-validated.
* When VLAN item exists in flow, mark packet as tagged,
* even if TCI is not specified.
*/
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+ }
if (!vlan_v)
return;
if (!vlan_m)
vlan_m = &rte_flow_item_vlan_mask;
tci_m = rte_be_to_cpu_16(vlan_m->tci);
tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
/*
* HW is optimized for IPv4/IPv6. In such cases, avoid setting
* ethertype, and use ip_version field instead.
*/
- if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
- vlan_m->inner_type == 0xFFFF) {
- flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
- } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
- vlan_m->inner_type == 0xFFFF) {
- flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
- } else {
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type &
- vlan_v->inner_type));
+ if (vlan_m->inner_type == 0xFFFF) {
+ switch (vlan_v->inner_type) {
+ case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+ return;
+ case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+ flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+ return;
+ default:
+ break;
+ }
+ }
+ if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+ /* Only one vlan_tag bit can be set. */
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+ return;
}
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] item_flags
- * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
- const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
- /*
- * On outer header (which must contains L2), or inner header with L2,
- * set cvlan_tag mask bit to mark this packet as untagged.
- * This should be done even if item->spec is empty.
- */
- if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv4_v)
return;
if (!ipv4_m)
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] item_flags
- * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
- const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
- /*
- * On outer header (which must contains L2), or inner header with L2,
- * set cvlan_tag mask bit to mark this packet as untagged.
- * This should be done even if item->spec is empty.
- */
- if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv6_v)
return;
if (!ipv6_m)
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
tbl = &tbl_data->tbl;
- rte_atomic32_inc(&tbl->refcnt);
+ __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
return tbl;
}
tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
* No multi-threads now, but still better to initialize the reference
* count before insert it into the hash list.
*/
- rte_atomic32_init(&tbl->refcnt);
+ __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED);
/* Jump action reference count is initialized here. */
- rte_atomic32_init(&tbl_data->jump.refcnt);
+ __atomic_store_n(&tbl_data->jump.refcnt, 0, __ATOMIC_RELAXED);
pos->key = table_key.v64;
ret = mlx5_hlist_insert(sh->flow_tbls, pos);
if (ret < 0) {
mlx5_flow_os_destroy_flow_tbl(tbl->obj);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
}
- rte_atomic32_inc(&tbl->refcnt);
+ __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED);
return tbl;
}
if (!tbl)
return 0;
- if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) {
struct mlx5_hlist_entry *pos = &tbl_data->entry;
mlx5_flow_os_destroy_flow_tbl(tbl->obj);
cache_matcher->priority,
key->direction ? "tx" : "rx",
(void *)cache_matcher,
- rte_atomic32_read(&cache_matcher->refcnt));
- rte_atomic32_inc(&cache_matcher->refcnt);
+ __atomic_load_n(&cache_matcher->refcnt,
+ __ATOMIC_RELAXED));
+ __atomic_fetch_add(&cache_matcher->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
}
/* Save the table information */
cache_matcher->tbl = tbl;
- rte_atomic32_init(&cache_matcher->refcnt);
/* only matcher ref++, table ref++ already done above in get API. */
- rte_atomic32_inc(&cache_matcher->refcnt);
+ __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
dev_flow->handle->dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
key->direction ? "tx" : "rx", (void *)cache_matcher,
- rte_atomic32_read(&cache_matcher->refcnt));
+ __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED));
return 0;
}
if (entry) {
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_fetch_add(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED);
dev_flow->handle->dvh.rix_tag = cache_resource->idx;
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
return 0;
}
/* Register new resource. */
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
}
- rte_atomic32_init(&cache_resource->refcnt);
- rte_atomic32_inc(&cache_resource->refcnt);
+ __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
mlx5_flow_os_destroy_flow_action(cache_resource->action);
mlx5_free(cache_resource);
dev_flow->dv.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
return 0;
}
return 0;
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
- rte_atomic32_read(&tag->refcnt));
- if (rte_atomic32_dec_and_test(&tag->refcnt)) {
+ __atomic_load_n(&tag->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&tag->refcnt, 1, __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, item_flags, tunnel,
+ items, tunnel,
dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, item_flags, tunnel,
+ items, tunnel,
dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
if (hrxq_idx) {
*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
hrxq_idx);
- rte_atomic32_inc(&(*hrxq)->refcnt);
+ __atomic_fetch_add(&(*hrxq)->refcnt, 1,
+ __ATOMIC_RELAXED);
}
} else {
struct mlx5_flow_rss_desc *rss_desc =
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
dev->data->port_id, (void *)matcher,
- rte_atomic32_read(&matcher->refcnt));
- if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
+ __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
mlx5_hlist_remove(priv->sh->encaps_decaps,
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
/* jump action memory free is inside the table release. */
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
(void *)cache_resource->action,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt,
+ __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
DRV_LOG(DEBUG, "default miss resource %p: removed",
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
mlx5_hlist_remove(priv->sh->modify_cmds,
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
(void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
+ if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
+ __ATOMIC_RELAXED) == 0) {
claim_zero(mlx5_flow_os_destroy_flow_action
(cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
return ret;
}
+static int
+flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
+
+ if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
+ ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
+ flags);
+ if (ret != 0)
+ return ret;
+ }
+ if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
+ ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
+ if (ret != 0)
+ return ret;
+ }
+ if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
+ ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.action_create = flow_dv_action_create,
.action_destroy = flow_dv_action_destroy,
.action_update = flow_dv_action_update,
+ .sync_domain = flow_dv_sync_domain,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */