#define MLX5_ETH_DRIVER_NAME mlx5_eth
-/* Driver type key for new device global syntax. */
-#define MLX5_DRIVER_KEY "driver"
-
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
-/*
- * Device parameter to force doorbell register mapping
- * to non-cahed region eliminating the extra write memory barrier.
- */
-#define MLX5_TX_DB_NC "tx_db_nc"
-
/*
* Device parameter to include 2 dsegs in the title WQEBB.
* Deprecated, ignored.
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
-/* Enable extending memsegs when creating a MR. */
-#define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
-
/* Select port representors to instantiate. */
#define MLX5_REPRESENTOR "representor"
/* Flow memory reclaim mode. */
#define MLX5_RECLAIM_MEM "reclaim_mem_mode"
-/* The default memory allocator used in PMD. */
-#define MLX5_SYS_MEM_EN "sys_mem_en"
/* Decap will be used or not. */
#define MLX5_DECAP_EN "decap_en"
/* Device parameter to configure allow or prevent duplicate rules pattern. */
#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
-/* Device parameter to configure implicit registration of mempool memory. */
-#define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en"
-
/* Device parameter to configure the delay drop when creating Rxqs. */
#define MLX5_DELAY_DROP "delay_drop"
} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
config->dv_esw_en = !!tmp;
} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
- config->dv_flow_en = !!tmp;
+ if (tmp > 2) {
+ DRV_LOG(ERR, "Invalid %s parameter.", key);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ config->dv_flow_en = tmp;
} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
if (tmp != MLX5_XMETA_MODE_LEGACY &&
tmp != MLX5_XMETA_MODE_META16 &&
*
* @param sh
* Pointer to shared device context.
- * @param devargs
- * Device arguments structure.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
* @param config
* Pointer to shared device configuration structure.
*
*/
static int
mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
- struct rte_devargs *devargs,
+ struct mlx5_kvargs_ctrl *mkvlist,
struct mlx5_sh_config *config)
{
- struct rte_kvargs *kvlist;
+ const char **params = (const char *[]){
+ MLX5_TX_PP,
+ MLX5_TX_SKEW,
+ MLX5_L3_VXLAN_EN,
+ MLX5_VF_NL_EN,
+ MLX5_DV_ESW_EN,
+ MLX5_DV_FLOW_EN,
+ MLX5_DV_XMETA_EN,
+ MLX5_LACP_BY_USER,
+ MLX5_RECLAIM_MEM,
+ MLX5_DECAP_EN,
+ MLX5_ALLOW_DUPLICATE_PATTERN,
+ NULL,
+ };
int ret = 0;
/* Default configuration. */
config->dv_flow_en = 1;
config->decap_en = 1;
config->allow_duplicate_pattern = 1;
- /* Parse device parameters. */
- if (devargs != NULL) {
- kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL) {
- DRV_LOG(ERR,
- "Failed to parse shared device arguments.");
- rte_errno = EINVAL;
- return -rte_errno;
- }
+ if (mkvlist != NULL) {
/* Process parameters. */
- ret = rte_kvargs_process(kvlist, NULL,
- mlx5_dev_args_check_handler, config);
- rte_kvargs_free(kvlist);
+ ret = mlx5_kvargs_process(mkvlist, params,
+ mlx5_dev_args_check_handler, config);
if (ret) {
DRV_LOG(ERR, "Failed to process device arguments: %s",
strerror(rte_errno));
DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
config->dv_esw_en = 0;
}
+ if (config->dv_esw_en && !config->dv_flow_en) {
+ DRV_LOG(DEBUG,
+ "E-Switch DV flow is supported only when DV flow is enabled.");
+ config->dv_esw_en = 0;
+ }
if (config->dv_miss_info && config->dv_esw_en)
config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
if (!config->dv_esw_en &&
*
* @param[in] spawn
* Pointer to the device attributes (name, port, etc).
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
*
* @return
* Pointer to mlx5_dev_ctx_shared object on success,
* otherwise NULL and rte_errno is set.
*/
struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ struct mlx5_kvargs_ctrl *mkvlist)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
DRV_LOG(ERR, "Fail to configure device capabilities.");
goto error;
}
- err = mlx5_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
- &sh->config);
+ err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config);
if (err) {
DRV_LOG(ERR, "Failed to process device configure: %s",
strerror(rte_errno));
for (i = 0; i < sh->max_port; i++) {
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
+ sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
}
if (sh->cdev->config.devx) {
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
mlx5_free_table_hash_list(struct mlx5_priv *priv)
{
struct mlx5_dev_ctx_shared *sh = priv->sh;
-
- if (!sh->flow_tbls)
+ struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
+ &sh->groups : &sh->flow_tbls;
+ if (*tbls == NULL)
return;
- mlx5_hlist_destroy(sh->flow_tbls);
- sh->flow_tbls = NULL;
+ mlx5_hlist_destroy(*tbls);
+ *tbls = NULL;
}
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+/**
+ * Allocate HW steering group hash list.
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ */
+static int
+mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
+{
+ int err = 0;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ char s[MLX5_NAME_SIZE];
+
+ MLX5_ASSERT(sh);
+ snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
+ sh->groups = mlx5_hlist_create
+ (s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
+ false, true, sh,
+ flow_hw_grp_create_cb,
+ flow_hw_grp_match_cb,
+ flow_hw_grp_remove_cb,
+ flow_hw_grp_clone_cb,
+ flow_hw_grp_clone_free_cb);
+ if (!sh->groups) {
+ DRV_LOG(ERR, "flow groups with hash creation failed.");
+ err = ENOMEM;
+ }
+ return err;
+}
+#endif
+
+
/**
* Initialize flow table hash list and create the root tables entry
* for each domain.
mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
{
int err = 0;
+
/* Tables are only used in DV and DR modes. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_NAME_SIZE];
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_alloc_hw_group_hash_list(priv);
MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ flow_hw_resource_release(dev);
+#endif
if (priv->rxq_privs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
if (ret)
DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
dev->data->port_id);
+ ret = mlx5_ext_rxq_verify(dev);
+ if (ret)
+ DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
+ dev->data->port_id);
ret = mlx5_rxq_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some Rx queues still remain",
dev->data->port_id);
if (priv->hrxqs)
mlx5_list_destroy(priv->hrxqs);
+ mlx5_free(priv->ext_rxqs);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_args_check(const char *key, const char *val, void *opaque)
+mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
{
- struct mlx5_dev_config *config = opaque;
+ struct mlx5_port_config *config = opaque;
signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
- if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
- !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
- !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) || !strcmp(MLX5_TX_PP, key) ||
- !strcmp(MLX5_MR_EXT_MEMSEG_EN, key) || !strcmp(MLX5_TX_SKEW, key) ||
- !strcmp(MLX5_RECLAIM_MEM, key) || !strcmp(MLX5_DECAP_EN, key) ||
- !strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) ||
- !strcmp(MLX5_L3_VXLAN_EN, key) || !strcmp(MLX5_VF_NL_EN, key) ||
- !strcmp(MLX5_DV_ESW_EN, key) || !strcmp(MLX5_DV_FLOW_EN, key) ||
- !strcmp(MLX5_DV_XMETA_EN, key) || !strcmp(MLX5_LACP_BY_USER, key))
+ if (!strcmp(MLX5_REPRESENTOR, key))
return 0;
errno = 0;
tmp = strtol(val, NULL, 0);
config->max_dump_files_num = tmp;
} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
config->lro_timeout = tmp;
- } else if (strcmp(RTE_DEVARGS_KEY_CLASS, key) == 0) {
- DRV_LOG(DEBUG, "class argument is %s.", val);
} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
config->log_hp_size = tmp;
} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
- } else {
- DRV_LOG(WARNING,
- "%s: unknown parameter, maybe it's for another class.",
- key);
}
return 0;
}
/**
- * Parse device parameters.
+ * Parse user port parameters and adjust them according to device capabilities.
*
+ * @param priv
+ * Pointer to shared device context.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
* @param config
- * Pointer to device configuration structure.
- * @param devargs
- * Device arguments structure.
+ * Pointer to port configuration structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
+mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
+ struct mlx5_port_config *config)
{
- struct rte_kvargs *kvlist;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
+ bool devx = priv->sh->cdev->config.devx;
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RXQ_PKT_PAD_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQ_INLINE_MIN,
+ MLX5_TXQ_INLINE_MAX,
+ MLX5_TXQ_INLINE_MPW,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQS_MAX_VEC,
+ MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
+ MLX5_REPRESENTOR,
+ MLX5_MAX_DUMP_FILES_NUM,
+ MLX5_LRO_TIMEOUT_USEC,
+ MLX5_HP_BUF_SIZE,
+ MLX5_DELAY_DROP,
+ NULL,
+ };
int ret = 0;
- if (devargs == NULL)
- return 0;
- /* Following UGLY cast is done to pass checkpatch. */
- kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL) {
- rte_errno = EINVAL;
- return -rte_errno;
+ /* Default configuration. */
+ memset(config, 0, sizeof(*config));
+ config->mps = MLX5_ARG_UNSET;
+ config->cqe_comp = 1;
+ config->rx_vec_en = 1;
+ config->txq_inline_max = MLX5_ARG_UNSET;
+ config->txq_inline_min = MLX5_ARG_UNSET;
+ config->txq_inline_mpw = MLX5_ARG_UNSET;
+ config->txqs_inline = MLX5_ARG_UNSET;
+ config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+ config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+ config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ config->log_hp_size = MLX5_ARG_UNSET;
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ if (mkvlist != NULL) {
+ /* Process parameters. */
+ ret = mlx5_kvargs_process(mkvlist, params,
+ mlx5_port_args_check_handler, config);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to process port arguments: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
}
- /* Process parameters. */
- ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
- if (ret) {
- rte_errno = EINVAL;
- ret = -rte_errno;
+ /* Adjust parameters according to device capabilities. */
+ if (config->hw_padding && !dev_cap->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+ config->hw_padding = 0;
+ } else if (config->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
+ }
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ if (config->mps == MLX5_ARG_UNSET)
+ config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
+ MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
+ else
+ config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
+ DRV_LOG(INFO, "%sMPS is %s",
+ config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+ config->mps == MLX5_MPW ? "legacy " : "",
+ config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* LRO is supported only when DV flow enabled. */
+ if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
+ dev_cap->lro_supported = 0;
+ if (dev_cap->lro_supported) {
+ /*
+ * If LRO timeout is not configured by application,
+ * use the minimal supported value.
+ */
+ if (!config->lro_timeout)
+ config->lro_timeout =
+ hca_attr->lro_timer_supported_periods[0];
+ DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
+ config->lro_timeout);
+ }
+ if (config->cqe_comp && !dev_cap->cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+ DRV_LOG(WARNING,
+ "Flow Tag CQE compression format isn't supported.");
+ config->cqe_comp = 0;
}
- rte_kvargs_free(kvlist);
- return ret;
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+ DRV_LOG(WARNING,
+ "L3/L4 Header CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+ config->cqe_comp ? "" : "not ");
+ if ((config->std_delay_drop || config->hp_delay_drop) &&
+ !dev_cap->rq_delay_drop_en) {
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
+ priv->dev_port);
+ }
+ if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+ config->mprq.enabled = 0;
+ }
+ if (config->max_dump_files_num == 0)
+ config->max_dump_files_num = 128;
+ /* Detect minimal data bytes to inline. */
+ mlx5_set_min_inline(priv);
+ DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+ config->hw_vlan_insert ? "" : "not ");
+ DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
+ DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
+ DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
+ DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+ DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+ config->std_delay_drop);
+ DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+ DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+ config->max_dump_files_num);
+ DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+ DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
+ config->mprq.log_stride_num);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
+ config->mprq.log_stride_size);
+ DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
+ DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
+ DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+ DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+ DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+ DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+ DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+ return 0;
+}
+
+/**
+ * Print the key for device argument.
+ *
+ * It is "dummy" handler whose whole purpose is to enable using
+ * mlx5_kvargs_process() function which set devargs as used.
+ *
+ * @param key
+ * Key argument.
+ * @param val
+ * Value associated with key, unused.
+ * @param opaque
+ * Unused, can be NULL.
+ *
+ * @return
+ * 0 on success, function cannot fail.
+ */
+static int
+mlx5_dummy_handler(const char *key, const char *val, void *opaque)
+{
+ DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
+ RTE_SET_USED(opaque);
+ RTE_SET_USED(val);
+ return 0;
+}
+
+/**
+ * Set requested devargs as used when device is already spawned.
+ *
+ * It is necessary since it is valid to ask probe again for existing device,
+ * if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
+ *
+ * @param name
+ * Name of the existing device.
+ * @param port_id
+ * Port identifier of the device.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control to sign as used.
+ */
+void
+mlx5_port_args_set_used(const char *name, uint16_t port_id,
+ struct mlx5_kvargs_ctrl *mkvlist)
+{
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RXQ_PKT_PAD_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQ_INLINE_MIN,
+ MLX5_TXQ_INLINE_MAX,
+ MLX5_TXQ_INLINE_MPW,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQS_MAX_VEC,
+ MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
+ MLX5_REPRESENTOR,
+ MLX5_MAX_DUMP_FILES_NUM,
+ MLX5_LRO_TIMEOUT_USEC,
+ MLX5_HP_BUF_SIZE,
+ MLX5_DELAY_DROP,
+ NULL,
+ };
+
+ /* Secondary process should not handle devargs. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+ MLX5_ASSERT(mkvlist != NULL);
+ DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
+ "already exists, set devargs as used:", name, port_id);
+ /* This function cannot fail with this handler. */
+ mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
}
/**
*
* @param cdev
* Pointer to mlx5 device structure.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_probe_again_args_validate(struct mlx5_common_device *cdev)
+mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
+ struct mlx5_kvargs_ctrl *mkvlist)
{
struct mlx5_dev_ctx_shared *sh = NULL;
struct mlx5_sh_config *config;
* Creates a temporary IB context configure structure according to new
* devargs attached in probing again.
*/
- ret = mlx5_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
- config);
+ ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config);
if (ret) {
DRV_LOG(ERR, "Failed to process device configure: %s",
strerror(rte_errno));
* - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
* and none (0 bytes) for other NICs
*
- * @param spawn
- * Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- * Device configuration parameters.
+ * @param priv
+ * Pointer to the private device data structure.
*/
void
-mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
- struct mlx5_dev_config *config)
+mlx5_set_min_inline(struct mlx5_priv *priv)
{
- struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+ struct mlx5_port_config *config = &priv->config;
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
- if (spawn->pci_dev != NULL) {
- switch (spawn->pci_dev->id.device_id) {
+ if (priv->pci_dev != NULL) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
if (config->txq_inline_min <
}
}
}
- if (spawn->pci_dev == NULL) {
+ if (priv->pci_dev == NULL) {
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
goto exit;
}
* inline data size with DevX. Try PCI ID
* to determine old NICs.
*/
- switch (spawn->pci_dev->id.device_id) {
+ switch (priv->pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: