* DR supports drop action placeholder when it is supported;
* otherwise, use the queue drop action.
*/
- if (mlx5_flow_discover_dr_action_support(dev))
- priv->root_drop_action = priv->drop_queue.hrxq->action;
- else
+ if (!priv->sh->drop_action_check_flag) {
+ if (!mlx5_flow_discover_dr_action_support(dev))
+ priv->sh->dr_drop_action_en = 1;
+ priv->sh->drop_action_check_flag = 1;
+ }
+ if (priv->sh->dr_drop_action_en)
priv->root_drop_action = priv->sh->dr_drop_action;
+ else
+ priv->root_drop_action = priv->drop_queue.hrxq->action;
#endif
}
priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
if (!priv->drop_queue.hrxq)
goto error;
- /* Supported Verbs flow priority number detection. */
- err = mlx5_flow_discover_priorities(eth_dev);
+ /* Port representor shares the same max prioirity with pf port. */
+ if (!priv->sh->flow_priority_check_flag) {
+ /* Supported Verbs flow priority number detection. */
+ err = mlx5_flow_discover_priorities(eth_dev);
+ priv->sh->flow_max_priority = err;
+ priv->sh->flow_priority_check_flag = 1;
+ } else {
+ err = priv->sh->flow_max_priority;
+ }
if (err < 0) {
err = -err;
goto error;
}
- priv->config.flow_prio = err;
if (!priv->config.dv_esw_en &&
priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
DRV_LOG(WARNING, "metadata mode %u is not supported "
goto error;
rte_rwlock_init(&priv->ind_tbls_lock);
/* Query availability of metadata reg_c's. */
- err = mlx5_flow_discover_mreg_c(eth_dev);
- if (err < 0) {
- err = -err;
- goto error;
+ if (!priv->sh->metadata_regc_check_flag) {
+ err = mlx5_flow_discover_mreg_c(eth_dev);
+ if (err < 0) {
+ err = -err;
+ goto error;
+ }
}
if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
DRV_LOG(DEBUG,
/* Rx queue count threshold to enable MPRQ. */
} mprq; /* Configurations for Multi-Packet RQ. */
int mps; /* Multi-packet send supported mode. */
- unsigned int flow_prio; /* Number of flow priorities. */
- enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
- /* Availibility of mreg_c's. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
unsigned int max_dump_files_num; /* Maximum dump files per queue. */
uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
uint32_t reclaim_mode:1; /* Reclaim memory. */
+ uint32_t dr_drop_action_en:1; /* Use DR drop action. */
+ uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */
+ uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
+ uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */
uint32_t max_port; /* Maximal IB device port index. */
struct mlx5_bond_info bond; /* Bonding information. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
struct mlx5_aso_ct_pools_mng *ct_mng;
/* Management data for ASO connection tracking. */
struct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */
+ unsigned int flow_max_priority;
+ enum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];
+ /* Availability of mreg_c's. */
struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
- if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
+ if (priv->sh->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unsupported tag id");
* If the available index REG_C_y >= REG_C_x, skip the
* color register.
*/
- if (skip_mtr_reg && config->flow_mreg_c
+ if (skip_mtr_reg && priv->sh->flow_mreg_c
[id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
if (id >= (uint32_t)(REG_C_7 - start_reg))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "invalid tag id");
- if (config->flow_mreg_c
+ if (priv->sh->flow_mreg_c
[id + 1 + start_reg - REG_C_0] != REG_NON)
- return config->flow_mreg_c
+ return priv->sh->flow_mreg_c
[id + 1 + start_reg - REG_C_0];
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "unsupported tag id");
}
- return config->flow_mreg_c[id + start_reg - REG_C_0];
+ return priv->sh->flow_mreg_c[id + start_reg - REG_C_0];
}
MLX5_ASSERT(false);
return rte_flow_error_set(error, EINVAL,
mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
/*
* Having available reg_c can be regarded inclusively as supporting
* - reg_c's are preserved across different domain (FDB and NIC) on
* packet loopback by flow lookup miss.
*/
- return config->flow_mreg_c[2] != REG_NON;
+ return priv->sh->flow_mreg_c[2] != REG_NON;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
if (!attr->group && !attr->transfer)
- return priv->config.flow_prio - 2;
+ return priv->sh->flow_max_priority - 2;
return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
}
if (!attr->group && !attr->transfer) {
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
- priority = priv->config.flow_prio - 1;
+ priority = priv->sh->flow_max_priority - 1;
return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
} else if (!external && attr->transfer && attr->group == 0 &&
attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
- return (priv->config.flow_prio - 1) * 3;
+ return (priv->sh->flow_max_priority - 1) * 3;
}
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
+ uint32_t priority_max = priv->sh->flow_max_priority - 1;
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
enum modify_reg idx;
int n = 0;
/* reg_c[0] and reg_c[1] are reserved. */
- config->flow_mreg_c[n++] = REG_C_0;
- config->flow_mreg_c[n++] = REG_C_1;
+ priv->sh->flow_mreg_c[n++] = REG_C_0;
+ priv->sh->flow_mreg_c[n++] = REG_C_1;
/* Discover availability of other reg_c's. */
for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
struct rte_flow_attr attr = {
struct rte_flow *flow;
struct rte_flow_error error;
- if (!config->dv_flow_en)
+ if (!priv->config.dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
flow_idx);
if (!flow)
continue;
- config->flow_mreg_c[n++] = idx;
+ priv->sh->flow_mreg_c[n++] = idx;
flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
- config->flow_mreg_c[n] = REG_NON;
+ priv->sh->flow_mreg_c[n] = REG_NON;
+ priv->sh->metadata_regc_check_flag = 1;
return 0;
}