fallback = true;
#else
fallback = false;
- if (!priv->config.devx || !priv->config.dv_flow_en ||
+ if (!sh->devx || !priv->config.dv_flow_en ||
!priv->config.hca_attr.flow_counters_dump ||
!(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
sh = mlx5_alloc_shared_dev_ctx(spawn, config);
if (!sh)
return NULL;
- config->devx = sh->devx;
#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
config->dest_tir = 1;
#endif
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- if (config->devx) {
+ if (sh->devx) {
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {
err = -err;
config->cqe_comp = 0;
}
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
- (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
+ (!sh->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
DRV_LOG(WARNING, "Flow Tag CQE compression"
" format isn't supported.");
config->cqe_comp = 0;
}
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
- (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
+ (!sh->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
DRV_LOG(WARNING, "L3/L4 Header CQE compression"
" format isn't supported.");
config->cqe_comp = 0;
config->hca_attr.log_max_static_sq_wq);
DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
- if (!config->devx) {
+ if (!sh->devx) {
DRV_LOG(ERR, "DevX is required for packet pacing");
err = ENODEV;
goto error;
goto error;
#endif
}
- if (config->devx) {
+ if (sh->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
err = config->hca_attr.access_register_user ?
if (err)
goto error;
}
- if (config->devx && config->dv_flow_en && config->dest_tir) {
+ if (sh->devx && config->dv_flow_en && config->dest_tir) {
priv->obj_ops = devx_obj_ops;
priv->obj_ops.drop_action_create =
ibv_obj_ops.drop_action_create;
unsigned int lacp_by_user:1;
/* Enable user to manage LACP traffic. */
unsigned int swp:3; /* Tx generic tunnel checksum and TSO offload. */
- unsigned int devx:1; /* Whether devx interface is available or not. */
unsigned int dest_tir:1; /* Whether advanced DR API is available. */
unsigned int reclaim_mode:2; /* Memory reclaim mode. */
unsigned int rt_timestamp:1; /* realtime timestamp format. */
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!priv->config.devx)
+ if (!priv->sh->devx)
goto notsup_err;
if (action_flags & MLX5_FLOW_ACTION_COUNT)
return rte_flow_error_set(error, EINVAL,
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_age *age = action->conf;
- if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
+ if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
!priv->sh->aso_age_mng))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"ratio value starts from 1");
- if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
+ if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
- if (!priv->config.devx) {
+ if (!priv->sh->devx) {
rte_errno = ENOTSUP;
return 0;
}
struct mlx5_aso_mtr_pool *pool;
uint32_t mtr_idx = 0;
- if (!priv->config.devx) {
+ if (!priv->sh->devx) {
rte_errno = ENOTSUP;
return 0;
}
uint32_t ct_idx;
MLX5_ASSERT(mng);
- if (!priv->config.devx) {
+ if (!priv->sh->devx) {
rte_errno = ENOTSUP;
return 0;
}
}
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- if (!dev_conf->devx) {
+ if (!priv->sh->devx) {
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_query_count *qc = data;
- if (!priv->config.devx)
+ if (!priv->sh->devx)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
uint64_t inn_pkts, inn_bytes;
int ret;
- if (!priv->config.devx)
+ if (!priv->sh->devx)
return -1;
ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
dev->data->port_id, strerror(rte_errno));
goto error;
}
- if ((priv->config.devx && priv->config.dv_flow_en &&
+ if ((priv->sh->devx && priv->config.dv_flow_en &&
priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
ret = priv->obj_ops.lb_dummy_queue_create(dev);
if (ret)
fallback = true;
#else
fallback = false;
- if (!priv->config.devx || !priv->config.dv_flow_en ||
+ if (!sh->devx || !priv->config.dv_flow_en ||
!priv->config.hca_attr.flow_counters_dump ||
!(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
sh = mlx5_alloc_shared_dev_ctx(spawn, config);
if (!sh)
return NULL;
- config->devx = sh->devx;
/* Initialize the shutdown event in mlx5_dev_spawn to
* support mlx5_is_removed for Windows.
*/
DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
config->cqe_comp = 0;
}
- if (config->devx) {
+ if (sh->devx) {
err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
if (err) {
err = -err;
(config->hw_vlan_strip ? "" : "not "));
config->hw_fcs_strip = config->hca_attr.scatter_fcs;
}
- if (config->devx) {
+ if (sh->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
err = config->hca_attr.access_register_user ?
goto error;
}
}
- if (config->devx && config->dv_flow_en) {
+ if (sh->devx && config->dv_flow_en) {
priv->obj_ops = devx_obj_ops;
} else {
DRV_LOG(ERR, "Flow mode %u is not supported "