+ /* Default configuration. */
+ memset(config, 0, sizeof(*config));
+ config->mps = MLX5_ARG_UNSET;
+ config->cqe_comp = 1;
+ config->rx_vec_en = 1;
+ config->txq_inline_max = MLX5_ARG_UNSET;
+ config->txq_inline_min = MLX5_ARG_UNSET;
+ config->txq_inline_mpw = MLX5_ARG_UNSET;
+ config->txqs_inline = MLX5_ARG_UNSET;
+ config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+ config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+ config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ config->log_hp_size = MLX5_ARG_UNSET;
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ if (mkvlist != NULL) {
+ /* Process parameters. */
+ ret = mlx5_kvargs_process(mkvlist, params,
+ mlx5_port_args_check_handler, config);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to process port arguments: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
+ }
+ /* Adjust parameters according to device capabilities. */
+ if (config->hw_padding && !dev_cap->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+ config->hw_padding = 0;
+ } else if (config->hw_padding) {
+ DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
+ }
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ if (config->mps == MLX5_ARG_UNSET)
+ config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
+ MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
+ else
+ config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
+ DRV_LOG(INFO, "%sMPS is %s",
+ config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+ config->mps == MLX5_MPW ? "legacy " : "",
+ config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* LRO is supported only when DV flow enabled. */
+ if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
+ dev_cap->lro_supported = 0;
+ if (dev_cap->lro_supported) {
+ /*
+ * If LRO timeout is not configured by application,
+ * use the minimal supported value.
+ */
+ if (!config->lro_timeout)
+ config->lro_timeout =
+ hca_attr->lro_timer_supported_periods[0];
+ DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
+ config->lro_timeout);
+ }
+ if (config->cqe_comp && !dev_cap->cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+ DRV_LOG(WARNING,
+ "Flow Tag CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+ (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+ DRV_LOG(WARNING,
+ "L3/L4 Header CQE compression format isn't supported.");
+ config->cqe_comp = 0;
+ }
+ DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+ config->cqe_comp ? "" : "not ");
+ if ((config->std_delay_drop || config->hp_delay_drop) &&
+ !dev_cap->rq_delay_drop_en) {
+ config->std_delay_drop = 0;
+ config->hp_delay_drop = 0;
+ DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
+ priv->dev_port);
+ }
+ if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+ config->mprq.enabled = 0;
+ }
+ if (config->max_dump_files_num == 0)
+ config->max_dump_files_num = 128;
+ /* Detect minimal data bytes to inline. */
+ mlx5_set_min_inline(priv);
+ DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+ config->hw_vlan_insert ? "" : "not ");
+ DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
+ DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
+ DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
+ DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+ DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+ config->std_delay_drop);
+ DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+ DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+ config->max_dump_files_num);
+ DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+ DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
+ config->mprq.log_stride_num);
+ DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
+ config->mprq.log_stride_size);
+ DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
+ DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
+ DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+ DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+ DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+ DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+ DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+ return 0;
+}
+
+/**
+ * Print the key for device argument.
+ *
+ * It is "dummy" handler whose whole purpose is to enable using
+ * mlx5_kvargs_process() function which set devargs as used.
+ *
+ * @param key
+ * Key argument.
+ * @param val
+ * Value associated with key, unused.
+ * @param opaque
+ * Unused, can be NULL.
+ *
+ * @return
+ * 0 on success, function cannot fail.
+ */
+static int
+mlx5_dummy_handler(const char *key, const char *val, void *opaque)
+{
+ DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
+ RTE_SET_USED(opaque);
+ RTE_SET_USED(val);
+ return 0;
+}
+
+/**
+ * Set requested devargs as used when device is already spawned.
+ *
+ * It is necessary since it is valid to ask probe again for existing device,
+ * if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
+ *
+ * @param name
+ * Name of the existing device.
+ * @param port_id
+ * Port identifier of the device.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control to sign as used.
+ */
+void
+mlx5_port_args_set_used(const char *name, uint16_t port_id,
+ struct mlx5_kvargs_ctrl *mkvlist)
+{
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RXQ_PKT_PAD_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQ_INLINE_MIN,
+ MLX5_TXQ_INLINE_MAX,
+ MLX5_TXQ_INLINE_MPW,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQS_MAX_VEC,
+ MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TX_VEC_EN,
+ MLX5_RX_VEC_EN,
+ MLX5_REPRESENTOR,
+ MLX5_MAX_DUMP_FILES_NUM,
+ MLX5_LRO_TIMEOUT_USEC,
+ MLX5_HP_BUF_SIZE,
+ MLX5_DELAY_DROP,
+ NULL,
+ };
+
+ /* Secondary process should not handle devargs. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+ MLX5_ASSERT(mkvlist != NULL);
+ DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
+ "already exists, set devargs as used:", name, port_id);
+ /* This function cannot fail with this handler. */
+ mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
+}
+
+/**
+ * Check sibling device configurations when probing again.
+ *
+ * Sibling devices sharing infiniband device context should have compatible
+ * configurations. This regards representors and bonding device.
+ *
+ * @param cdev
+ * Pointer to mlx5 device structure.
+ * @param mkvlist
+ * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
+ struct mlx5_kvargs_ctrl *mkvlist)
+{
+ struct mlx5_dev_ctx_shared *sh = NULL;
+ struct mlx5_sh_config *config;
+ int ret;
+
+ /* Secondary process should not handle devargs. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)