struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
* this must be always enabled (metadata may arive
* from other port - not from local flows only.
*/
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n++;
- }
if (tunnel) {
unsigned int j;
}
}
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (priv->mark_enabled)
+ return;
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ rxq_ctrl->rxq.mark = 1;
+ }
+ priv->mark_enabled = 1;
+}
+
/**
* Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
*
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t handle_idx;
struct mlx5_flow_handle *dev_handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
+ if (wks->mark)
+ flow_rxq_mark_flag_set(dev);
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dev_handle, next)
flow_drv_rxq_flags_set(dev, dev_handle);
struct mlx5_flow_handle *dev_handle)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int mark = dev_handle->mark;
const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
struct mlx5_ind_table_obj *ind_tbl = NULL;
unsigned int i;
MLX5_ASSERT(rxq_ctrl != NULL);
if (rxq_ctrl == NULL)
continue;
- if (priv->config.dv_flow_en &&
- priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
- mlx5_flow_ext_mreg_supported(dev)) {
- rxq_ctrl->rxq.mark = 1;
- rxq_ctrl->flow_mark_n = 1;
- } else if (mark) {
- rxq_ctrl->flow_mark_n--;
- rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
- }
if (tunnel) {
unsigned int j;
if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq->ctrl->flow_mark_n = 0;
rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
rxq->ctrl->flow_tunnels_n[j] = 0;
rxq->ctrl->rxq.tunnel = 0;
}
+ priv->mark_enabled = 0;
}
/**
struct rte_flow_error *error)
{
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
flow_split_info->flow_idx, error);
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
- if (flow_split_info->prefix_mark)
- dev_flow->handle->mark = 1;
+ if (flow_split_info->prefix_mark) {
+ MLX5_ASSERT(wks);
+ wks->mark = 1;
+ }
if (sub_flow)
*sub_flow = dev_flow;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_FLOW_TABLE_LEVEL_METER;
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark |= dev_flow->handle->mark;
+ flow_split_info->prefix_mark |= wks->mark;
flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
}
/* Add the prefix subflow. */
struct mlx5_flow_dv_sample_resource *sample_res;
struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
struct mlx5_flow_tbl_resource *sfx_tbl;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
#endif
size_t act_size;
size_t item_size;
}
flow_split_info->prefix_layers =
flow_get_prefix_layer_flags(dev_flow);
- flow_split_info->prefix_mark |= dev_flow->handle->mark;
+ MLX5_ASSERT(wks);
+ flow_split_info->prefix_mark |= wks->mark;
/* Suffix group level already be scaled with factor, set
* MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
* again in translation.
(((const struct rte_flow_action_mark *)
(sub_actions->conf))->id);
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
pre_rix = dev_flow->handle->dvh.rix_tag;
/* Save the mark resource before sample */
pre_r = dev_flow->dv.tag_resource;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
NULL,
"cannot create policy "
"mark action for this color");
- dev_flow.handle->mark = 1;
+ wks->mark = 1;
if (flow_dv_tag_resource_register(dev, tag_be,
&dev_flow, &flow_err))
return -rte_mtr_error_set(error,
struct mlx5_meter_policy_action_container *act_cnt;
uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
uint16_t sub_policy_num;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
rte_spinlock_lock(&mtr_policy->sl);
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
if (act_cnt->rix_mark || act_cnt->modify_hdr) {
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
if (act_cnt->rix_mark)
- dh.mark = 1;
+ wks->mark = 1;
dh.fate_action = MLX5_FLOW_FATE_QUEUE;
dh.rix_hrxq = hrxq_idx[i];
flow_drv_rxq_flags_set(dev, &dh);