Currently, one flow only has one fate action, the fate actions members
in the flow struct can be reorganized as union to save the memory for
flow struct.
This commit reorganizes the fate actions as union, the act_flags helps
to identify the fate action type when flow destroys.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
/**< Index to encap/decap resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
/**< Index to encap/decap resource in cache. */
struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
/**< Pointer to modify header resource in cache. */
- uint32_t jump;
- /**< Index to the jump action resource. */
- uint32_t port_id_action;
- /**< Index to port ID action resource. */
struct mlx5_vf_vlan vf_vlan;
/**< Structure for VF VLAN workaround. */
uint32_t push_vlan_res;
struct mlx5_vf_vlan vf_vlan;
/**< Structure for VF VLAN workaround. */
uint32_t push_vlan_res;
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
void *ib_flow; /**< Verbs flow pointer. */
uint64_t act_flags;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
void *ib_flow; /**< Verbs flow pointer. */
- uint32_t hrxq; /**< Hash Rx queue object index. */
struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
union {
uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
uint32_t mtr_flow_id; /**< Unique meter match flow id. */
};
struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
union {
uint32_t qrss_id; /**< Uniqie Q/RSS suffix subflow tag. */
uint32_t mtr_flow_id; /**< Unique meter match flow id. */
};
+ union {
+ uint32_t hrxq; /**< Hash Rx queue object index. */
+ uint32_t jump; /**< Index to the jump action resource. */
+ uint32_t port_id_action;
+ /**< Index to port ID action resource. */
+ };
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#endif
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
struct mlx5_flow_handle_dv dvh;
#endif
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->handle->dvh.jump = tbl_data->idx;
+ dev_flow->handle->jump = tbl_data->idx;
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
dev_flow->dv.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->handle->dvh.port_id_action = idx;
+ dev_flow->handle->port_id_action = idx;
dev_flow->dv.port_id_action = cache_resource;
return 0;
}
}
/* Register new port id action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
dev_flow->dv.port_id_action = cache_resource;
return 0;
}
}
/* Register new port id action resource. */
cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
- &dev_flow->handle->dvh.port_id_action);
+ &dev_flow->handle->port_id_action);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
- dev_flow->handle->dvh.port_id_action, cache_resource,
- next);
+ dev_flow->handle->port_id_action, cache_resource, next);
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
dev_flow->dv.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
if (flow_dv_port_id_action_resource_register
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- MLX5_ASSERT(!handle->dvh.port_id_action);
+ MLX5_ASSERT(!handle->port_id_action);
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
"cannot get drop hash queue");
goto error;
}
"cannot get drop hash queue");
goto error;
}
+ /*
+ * Drop queues will be released by the specify
+ * mlx5_hrxq_drop_release() function. Assign
+ * the special index to hrxq to mark the queue
+ * has been allocated.
+ */
+ dh->hrxq = UINT32_MAX;
dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->act_flags &
dv->actions[n++] = drop_hrxq->action;
}
} else if (dh->act_flags &
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
+ /* hrxq is union, don't clear it if the flag is not set. */
- if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
mlx5_hrxq_drop_release(dev);
+ dh->hrxq = 0;
+ } else if (dh->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, dh->hrxq);
mlx5_hrxq_release(dev, dh->hrxq);
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
struct mlx5_flow_tbl_data_entry *tbl_data;
tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
if (!tbl_data)
return 0;
cache_resource = &tbl_data->jump;
if (!tbl_data)
return 0;
cache_resource = &tbl_data->jump;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
- uint32_t idx = handle->dvh.port_id_action;
+ uint32_t idx = handle->port_id_action;
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
idx);
claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
dh->ib_flow = NULL;
}
claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
dh->ib_flow = NULL;
}
+ /* hrxq is union, don't touch it only the flag is set. */
- if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
mlx5_hrxq_drop_release(dev);
+ dh->hrxq = 0;
+ } else if (dh->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, dh->hrxq);
mlx5_hrxq_release(dev, dh->hrxq);
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
flow_dv_encap_decap_resource_release(dev, dev_handle);
if (dev_handle->dvh.modify_hdr)
flow_dv_modify_hdr_resource_release(dev_handle);
- if (dev_handle->dvh.jump)
+ if (dev_handle->act_flags & MLX5_FLOW_ACTION_JUMP)
flow_dv_jump_tbl_resource_release(dev, dev_handle);
flow_dv_jump_tbl_resource_release(dev, dev_handle);
- if (dev_handle->dvh.port_id_action)
+ if (dev_handle->act_flags & MLX5_FLOW_ACTION_PORT_ID)
flow_dv_port_id_action_resource_release(dev,
dev_handle);
if (dev_handle->dvh.push_vlan_res)
flow_dv_port_id_action_resource_release(dev,
dev_handle);
if (dev_handle->dvh.push_vlan_res)
claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
handle->ib_flow = NULL;
}
claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
handle->ib_flow = NULL;
}
+ /* hrxq is union, don't touch it only the flag is set. */
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
mlx5_hrxq_drop_release(dev);
+ handle->hrxq = 0;
+ } else if (handle->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, handle->hrxq);
mlx5_hrxq_release(dev, handle->hrxq);
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
dev_handles, handle, next) {
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
dev_handles, handle, next) {
+ /* hrxq is union, don't touch it only the flag is set. */
- if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
+ if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
mlx5_hrxq_drop_release(dev);
mlx5_hrxq_drop_release(dev);
+ handle->hrxq = 0;
+ } else if (handle->act_flags &
+ (MLX5_FLOW_ACTION_QUEUE |
+ MLX5_FLOW_ACTION_RSS)) {
mlx5_hrxq_release(dev, handle->hrxq);
mlx5_hrxq_release(dev, handle->hrxq);
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
}
if (handle->vf_vlan.tag && handle->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);