RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single decap"
" action in a flow");
- if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have decap action after"
- " modify action");
/* decap action is valid on egress only if it is followed by encap */
if (attr->egress) {
for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL, "decap action not supported"
" for egress");
+ } else if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have decap action after"
+ " modify action");
}
return 0;
}
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
+ /*
+ * Depending on rdma_core version the glue routine calls
+ * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+ * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
+ */
cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_vport
+ mlx5_glue->dr_create_flow_action_dest_port
(priv->sh->fdb_domain, resource->port_id);
if (!cache_resource->action) {
rte_free(cache_resource);
.direction = !!egress,
}
};
- struct mlx5_hlist_entry *pos;
+ struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+ table_key.v64);
struct mlx5_flow_tbl_data_entry *tbl_data;
-
-#ifdef HAVE_MLX5DV_DR
int ret;
void *domain;
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
tbl = &tbl_data->tbl;
- if (!tbl->obj) {
- rte_flow_error_set(error, ENOKEY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot find created table");
- return NULL;
- }
rte_atomic32_inc(&tbl->refcnt);
return tbl;
}
}
rte_atomic32_inc(&tbl->refcnt);
return tbl;
-#else
- /* Just to make the compiling pass when no HAVE_MLX5DV_DR defined. */
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
- entry);
- tbl = &tbl_data->tbl;
- if (!tbl->obj) {
- rte_flow_error_set(error, ENOKEY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot find created table");
- return NULL;
- }
- rte_atomic32_inc(&tbl->refcnt);
- return tbl;
- }
- return NULL;
-#endif
}
/**
* Pointer to rte_eth_dev structure.
* @param[in, out] matcher
* Pointer to flow matcher.
+ * @param[in, out] key
+ * Pointer to flow table key.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
static int
flow_dv_matcher_register(struct rte_eth_dev *dev,
struct mlx5_flow_dv_matcher *matcher,
+ union mlx5_flow_tbl_key *key,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&matcher->mask,
};
- struct mlx5_flow_tbl_resource *tbl = NULL;
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
+ key->domain, error);
+ if (!tbl)
+ return -rte_errno; /* No need to refill the error info */
+ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
/* Lookup from cache. */
- LIST_FOREACH(cache_matcher, &sh->matchers, next) {
+ LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
if (matcher->crc == cache_matcher->crc &&
matcher->priority == cache_matcher->priority &&
- matcher->egress == cache_matcher->egress &&
- matcher->group == cache_matcher->group &&
- matcher->transfer == cache_matcher->transfer &&
!memcmp((const void *)matcher->mask.buf,
(const void *)cache_matcher->mask.buf,
cache_matcher->mask.size)) {
DRV_LOG(DEBUG,
- "priority %hd use %s matcher %p: refcnt %d++",
+ "%s group %u priority %hd use %s "
+ "matcher %p: refcnt %d++",
+ key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
- cache_matcher->egress ? "tx" : "rx",
+ key->direction ? "tx" : "rx",
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
dev_flow->dv.matcher = cache_matcher;
+ /* old matcher should not make the table ref++. */
+ flow_dv_tbl_resource_release(dev, tbl);
return 0;
}
}
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
- if (!cache_matcher)
+ if (!cache_matcher) {
+ flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate matcher memory");
- tbl = flow_dv_tbl_resource_get(dev, matcher->group,
- matcher->egress, matcher->transfer,
- error);
- if (!tbl) {
- rte_free(cache_matcher);
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create table");
}
*cache_matcher = *matcher;
dv_attr.match_criteria_enable =
flow_dv_matcher_enable(cache_matcher->mask.buf);
dv_attr.priority = matcher->priority;
- if (matcher->egress)
+ if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
}
+ /* Save the table information */
+ cache_matcher->tbl = tbl;
+ rte_atomic32_init(&cache_matcher->refcnt);
+ /* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
- LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
+ LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
dev_flow->dv.matcher = cache_matcher;
- DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+ DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
+ key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
- cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+ key->direction ? "tx" : "rx", (void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
- rte_atomic32_inc(&tbl->refcnt);
return 0;
}
*
* @param dev[in, out]
* Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- * Pointer to tag resource.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
* @parm[in, out] dev_flow
* Pointer to the dev_flow.
* @param[out] error
static int
flow_dv_tag_resource_register
(struct rte_eth_dev *dev,
- struct mlx5_flow_dv_tag_resource *resource,
+ uint32_t tag_be24,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
+ struct mlx5_hlist_entry *entry;
/* Lookup a matching resource from cache. */
- LIST_FOREACH(cache_resource, &sh->tags, next) {
- if (resource->tag == cache_resource->tag) {
- DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
- (void *)cache_resource,
- rte_atomic32_read(&cache_resource->refcnt));
- rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
- return 0;
- }
+ entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+ if (entry) {
+ cache_resource = container_of
+ (entry, struct mlx5_flow_dv_tag_resource, entry);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.tag_resource = cache_resource;
+ DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
}
- /* Register new resource. */
+ /* Register new resource. */
cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
- *cache_resource = *resource;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag
- (resource->tag);
+ cache_resource->entry.key = (uint64_t)tag_be24;
+ cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
if (!cache_resource->action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
- LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
+ if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
+ mlx5_glue->destroy_flow_action(cache_resource->action);
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot insert tag");
+ }
dev_flow->dv.tag_resource = cache_resource;
- DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
+ DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return 0;
flow_dv_tag_release(struct rte_eth_dev *dev,
struct mlx5_flow_dv_tag_resource *tag)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+
assert(tag);
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
claim_zero(mlx5_glue->destroy_flow_action(tag->action));
- LIST_REMOVE(tag, next);
+ mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
rte_free(tag);
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"No eswitch info was found for port");
- if (priv->vport_meta_mask)
- *dst_port_id = priv->vport_meta_tag;
- else
- *dst_port_id = priv->vport_id;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ /*
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_ib_port().
+ */
+ *dst_port_id = priv->ibv_port;
+#else
+ /*
+ * Legacy mode, no LAG configurations is supported.
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_vport().
+ */
+ *dst_port_id = priv->vport_id;
+#endif
return 0;
}
MLX5DV_FLOW_TABLE_TYPE_NIC_RX
};
union flow_dv_attr flow_attr = { .attr = 0 };
- struct mlx5_flow_dv_tag_resource tag_resource;
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
uint32_t modify_action_position = UINT32_MAX;
void *match_mask = matcher.mask.buf;
void *match_value = dev_flow->dv.value.buf;
action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
break;
}
- tag_resource.tag =
- mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
/* Fall-through */
case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
/* Legacy (non-extensive) MARK action. */
- tag_resource.tag = mlx5_flow_mark_set
+ tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
if (!dev_flow->dv.tag_resource)
if (flow_dv_tag_resource_register
- (dev, &tag_resource, dev_flow, error))
- return errno;
+ (dev, tag_be, dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.tag_resource->action;
break;
matcher.mask.size);
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
- matcher.egress = attr->egress;
- matcher.group = dev_flow->group;
- matcher.transfer = attr->transfer;
- if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ /* reserved field no needs to be set to 0 here. */
+ tbl_key.domain = attr->transfer;
+ tbl_key.direction = attr->egress;
+ tbl_key.table_id = dev_flow->group;
+ if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
}
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_tbl_data_entry *tbl_data;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
- struct mlx5_hlist_entry *pos;
- union mlx5_flow_tbl_key table_key = {
- {
- .table_id = matcher->group,
- .reserved = 0,
- .domain = !!matcher->transfer,
- .direction = !!matcher->egress,
- }
- };
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
- pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
- if (pos) {
- tbl_data = container_of(pos,
- struct mlx5_flow_tbl_data_entry, entry);
- flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
- }
+ /* table ref-- in release interface. */
+ flow_dv_tbl_resource_release(dev, matcher->tbl);
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);