/**
* Find existing encap/decap resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to encap/decap resource.
/**
* Find existing table jump resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to jump table resource.
/**
* Find existing table port ID resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
/**
* Find existing push vlan resource or create and register a new one.
*
- * @param dev[in, out]
+ * @param [in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] resource
* Pointer to port ID action resource.
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
- uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
- MLX5_MAX_TABLES;
uint32_t target_group, table;
int ret = 0;
&table, error);
if (ret)
return ret;
- if (table >= max_group)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
- "target group index out of range");
if (attributes->group == target_group)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
NULL,
"groups are not supported");
#else
- uint32_t max_group = attributes->transfer ?
- MLX5_MAX_TABLES_FDB :
- external ?
- MLX5_MAX_TABLES_EXTERNAL :
- MLX5_MAX_TABLES;
uint32_t table;
int ret;
&table, error);
if (ret)
return ret;
- if (table >= max_group)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
- "group index out of range");
#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
/**
* Get a flow table.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in] table_id
* Table id to use.
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = table_id,
+ .reserved = 0,
+ .domain = !!transfer,
+ .direction = !!egress,
+ }
+ };
+ struct mlx5_hlist_entry *pos;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
#ifdef HAVE_MLX5DV_DR
- if (transfer) {
- tbl = &sh->fdb_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->fdb_domain, table_id);
- } else if (egress) {
- tbl = &sh->tx_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->tx_domain, table_id);
- } else {
- tbl = &sh->rx_tbl[table_id];
- if (!tbl->obj)
- tbl->obj = mlx5_glue->dr_create_flow_tbl
- (sh->rx_domain, table_id);
+ int ret;
+ void *domain;
+
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ tbl = &tbl_data->tbl;
+ if (!tbl->obj) {
+ rte_flow_error_set(error, ENOKEY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find created table");
+ return NULL;
+ }
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
}
+ tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+ if (!tbl_data) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow table data entry");
+ return NULL;
+ }
+ tbl = &tbl_data->tbl;
+ pos = &tbl_data->entry;
+ if (transfer)
+ domain = sh->fdb_domain;
+ else if (egress)
+ domain = sh->tx_domain;
+ else
+ domain = sh->rx_domain;
+ tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
if (!tbl->obj) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create table");
+ NULL, "cannot create flow table object");
+ rte_free(tbl_data);
return NULL;
}
+ /*
+ * No multi-threads now, but still better to initialize the reference
+ * count before insert it into the hash list.
+ */
+ rte_atomic32_init(&tbl->refcnt);
+ pos->key = table_key.v64;
+ ret = mlx5_hlist_insert(sh->flow_tbls, pos);
+ if (ret < 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot insert flow table data entry");
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ rte_free(tbl_data);
+ }
rte_atomic32_inc(&tbl->refcnt);
return tbl;
#else
- (void)error;
- (void)tbl;
- if (transfer)
- return &sh->fdb_tbl[table_id];
- else if (egress)
- return &sh->tx_tbl[table_id];
- else
- return &sh->rx_tbl[table_id];
+ /* Just to make the compiling pass when no HAVE_MLX5DV_DR defined. */
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ tbl = &tbl_data->tbl;
+ if (!tbl->obj) {
+ rte_flow_error_set(error, ENOKEY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find created table");
+ return NULL;
+ }
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
+ }
+ return NULL;
#endif
}
/**
* Release a flow table.
*
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
* @param[in] tbl
* Table resource to be released.
*
* Returns 0 if table was released, else return 1;
*/
static int
-flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
+flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_tbl_resource *tbl)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+
if (!tbl)
return 0;
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ struct mlx5_hlist_entry *pos = &tbl_data->entry;
+
mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
+ /* remove the entry from the hash list and free memory. */
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ rte_free(tbl_data);
return 0;
}
return 1;
/**
* Register the flow matcher.
*
- * @param dev[in, out]
+ * @param[in, out] dev
* Pointer to rte_eth_dev structure.
* @param[in, out] matcher
* Pointer to flow matcher.
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
- flow_dv_tbl_resource_release(tbl);
+ flow_dv_tbl_resource_release(dev, tbl);
#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
jump_tbl_resource.tbl = tbl;
if (flow_dv_jump_tbl_resource_register
(dev, &jump_tbl_resource, dev_flow, error)) {
- flow_dv_tbl_resource_release(tbl);
+ flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_ACTION,
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
- struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
+ struct mlx5_hlist_entry *pos;
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = matcher->group,
+ .reserved = 0,
+ .domain = !!matcher->transfer,
+ .direction = !!matcher->egress,
+ }
+ };
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
- if (matcher->egress)
- tbl = &sh->tx_tbl[matcher->group];
- else
- tbl = &sh->rx_tbl[matcher->group];
- flow_dv_tbl_resource_release(tbl);
+ pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+ if (pos) {
+ tbl_data = container_of(pos,
+ struct mlx5_flow_tbl_data_entry, entry);
+ flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
+ }
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
/**
* Release an jump to table action resource.
*
+ * @param dev
+ * Pointer to Ethernet device.
* @param flow
* Pointer to mlx5_flow.
*
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow *flow)
{
struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
flow->dv.jump;
claim_zero(mlx5_glue->destroy_flow_action
(cache_resource->action));
LIST_REMOVE(cache_resource, next);
- flow_dv_tbl_resource_release(cache_resource->tbl);
+ flow_dv_tbl_resource_release(dev, cache_resource->tbl);
rte_free(cache_resource);
DRV_LOG(DEBUG, "jump table resource %p: removed",
(void *)cache_resource);
if (dev_flow->dv.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_flow);
if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev_flow);
+ flow_dv_jump_tbl_resource_release(dev, dev_flow);
if (dev_flow->dv.port_id_action)
flow_dv_port_id_action_resource_release(dev_flow);
if (dev_flow->dv.push_vlan_res)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->egress.any_matcher));
if (mtd->egress.tbl)
- claim_zero(flow_dv_tbl_resource_release(mtd->egress.tbl));
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->egress.tbl));
if (mtd->ingress.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->ingress.color_matcher));
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
- claim_zero(flow_dv_tbl_resource_release(mtd->ingress.tbl));
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->ingress.tbl));
if (mtd->transfer.color_matcher)
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->transfer.color_matcher));
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
- claim_zero(flow_dv_tbl_resource_release(mtd->transfer.tbl));
+ claim_zero(flow_dv_tbl_resource_release(dev,
+ mtd->transfer.tbl));
if (mtd->drop_actn)
claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
rte_free(mtd);