X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Flinux%2Fmlx5_os.c;h=2e1606a7332f5e84c2ec51619bf8a1ad8c5fb075;hb=3a2f674b6aa867cb44a4a3ca93a30dafda97f700;hp=5d2c9b9c8bb100703ce8b748a737946213c2940f;hpb=87af0d1e1bcc15ca414060263091a0f880ad3a86;p=dpdk.git diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 5d2c9b9c8b..2e1606a733 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -370,8 +370,7 @@ mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh) sh->dev_cap.txpp_en = 0; #endif /* Check for LRO support. */ - if (sh->dev_cap.dest_tir && sh->dev_cap.dv_flow_en && - hca_attr->lro_cap) { + if (mlx5_devx_obj_ops_en(sh) && hca_attr->lro_cap) { /* TBD check tunnel lro caps. */ sh->dev_cap.lro_supported = 1; DRV_LOG(DEBUG, "Device supports LRO."); @@ -436,7 +435,7 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) dv_attr.priority = 3; #ifdef HAVE_MLX5DV_DR_ESWITCH void *misc2_m; - if (priv->config.dv_esw_en) { + if (priv->sh->config.dv_esw_en) { /* FDB enabled reg_c_0 */ dv_attr.match_criteria_enable |= (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT); @@ -483,6 +482,8 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) err = mlx5_alloc_table_hash_list(priv); if (err) goto error; + if (priv->sh->config.dv_flow_en == 2) + return 0; /* The resources below are only valid with DV support. */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT /* Init port id action list. */ @@ -557,7 +558,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) } sh->tx_domain = domain; #ifdef HAVE_MLX5DV_DR_ESWITCH - if (priv->config.dv_esw_en) { + if (sh->config.dv_esw_en) { domain = mlx5_glue->dr_create_domain(sh->cdev->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB); if (!domain) { @@ -579,20 +580,20 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) goto error; } #endif - if (!sh->tunnel_hub && priv->config.dv_miss_info) + if (!sh->tunnel_hub && sh->config.dv_miss_info) err = mlx5_alloc_tunnel_hub(sh); if (err) { DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err); goto error; } - if (priv->config.reclaim_mode == MLX5_RCM_AGGR) { + if (sh->config.reclaim_mode == MLX5_RCM_AGGR) { mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1); mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1); if (sh->fdb_domain) mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1); } sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan(); - if (!priv->config.allow_duplicate_pattern) { + if (!sh->config.allow_duplicate_pattern) { #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?"); #endif @@ -859,7 +860,7 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) #ifdef HAVE_MLX5DV_DR struct mlx5_priv *priv = dev->data->dev_private; - if (!priv->config.dv_flow_en || !priv->sh->dr_drop_action) + if (!priv->sh->config.dv_flow_en || !priv->sh->dr_drop_action) return; /** * DR supports drop action placeholder when it is supported; @@ -1000,10 +1001,10 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn, * Backing DPDK device. * @param spawn * Verbs device parameters (name, port, switch_info) to spawn. - * @param config - * Device configuration parameters. * @param eth_da * Device arguments. + * @param mkvlist + * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. * * @return * A valid Ethernet device object on success, NULL otherwise and rte_errno @@ -1015,12 +1016,11 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn, static struct rte_eth_dev * mlx5_dev_spawn(struct rte_device *dpdk_dev, struct mlx5_dev_spawn_data *spawn, - struct mlx5_dev_config *config, - struct rte_eth_devargs *eth_da) + struct rte_eth_devargs *eth_da, + struct mlx5_kvargs_ctrl *mkvlist) { const struct mlx5_switch_info *switch_info = &spawn->info; struct mlx5_dev_ctx_shared *sh = NULL; - struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr; struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP }; struct rte_eth_dev *eth_dev = NULL; struct mlx5_priv *priv = NULL; @@ -1030,7 +1030,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, int own_domain_id = 0; uint16_t port_id; struct mlx5_port_info vport_info = { .query_flags = 0 }; - int nl_rdma = -1; + int nl_rdma; int i; /* Determine if this port representor is supposed to be spawned. */ @@ -1067,6 +1067,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, DRV_LOG(WARNING, "device name overflow %s", name); /* check if the device is already spawned */ if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { + /* + * When device is already spawned, its devargs should be set + * as used. otherwise, mlx5_kvargs_validate() will fail. + */ + if (mkvlist) + mlx5_port_args_set_used(name, port_id, mkvlist); rte_errno = EEXIST; return NULL; } @@ -1108,38 +1114,9 @@ err_secondary: mlx5_dev_close(eth_dev); return NULL; } - /* Process parameters. */ - err = mlx5_args(config, dpdk_dev->devargs); - if (err) { - DRV_LOG(ERR, "failed to process device arguments: %s", - strerror(rte_errno)); - return NULL; - } - sh = mlx5_alloc_shared_dev_ctx(spawn, config); + sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist); if (!sh) return NULL; - /* Update final values for devargs before check sibling config. */ - if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) { - DRV_LOG(WARNING, "DV flow is not supported."); - config->dv_flow_en = 0; - } - if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) { - DRV_LOG(WARNING, "E-Switch DV flow is not supported."); - config->dv_esw_en = 0; - } - if (config->dv_miss_info && config->dv_esw_en) - config->dv_xmeta_en = MLX5_XMETA_MODE_META16; - if (!config->dv_esw_en && - config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { - DRV_LOG(WARNING, - "Metadata mode %u is not supported (no E-Switch).", - config->dv_xmeta_en); - config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; - } - /* Check sibling device configurations. */ - err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev); - if (err) - goto error; nl_rdma = mlx5_nl_init(NETLINK_RDMA); /* Check port status. */ if (spawn->phys_port <= UINT8_MAX) { @@ -1292,46 +1269,15 @@ err_secondary: DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n", priv->dev_port, priv->domain_id); } - if (config->hw_padding && !sh->dev_cap.hw_padding) { - DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); - config->hw_padding = 0; - } else if (config->hw_padding) { - DRV_LOG(DEBUG, "Rx end alignment padding is enabled"); - } - /* - * MPW is disabled by default, while the Enhanced MPW is enabled - * by default. - */ - if (config->mps == MLX5_ARG_UNSET) - config->mps = (sh->dev_cap.mps == MLX5_MPW_ENHANCED) ? - MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED; - else - config->mps = config->mps ? sh->dev_cap.mps : MLX5_MPW_DISABLED; - DRV_LOG(INFO, "%sMPS is %s", - config->mps == MLX5_MPW_ENHANCED ? "enhanced " : - config->mps == MLX5_MPW ? "legacy " : "", - config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); if (sh->cdev->config.devx) { + struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr; + sh->steering_format_version = hca_attr->steering_format_version; - /* LRO is supported only when DV flow enabled. */ - if (sh->dev_cap.lro_supported && config->dv_flow_en) - sh->dev_cap.lro_supported = 0; - if (sh->dev_cap.lro_supported) { - /* - * If LRO timeout is not configured by application, - * use the minimal supported value. - */ - if (!config->lro_timeout) - config->lro_timeout = - hca_attr->lro_timer_supported_periods[0]; - DRV_LOG(DEBUG, "LRO session timeout set to %d usec", - config->lro_timeout); - } #if defined(HAVE_MLX5DV_DR) && \ (defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \ defined(HAVE_MLX5_DR_CREATE_ACTION_ASO)) if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old && - config->dv_flow_en) { + sh->config.dv_flow_en) { uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids; /* * Meter needs two REG_C's for color match and pre-sfx @@ -1405,7 +1351,7 @@ err_secondary: #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */ #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE) if (hca_attr->log_max_ft_sampler_num > 0 && - config->dv_flow_en) { + sh->config.dv_flow_en) { priv->sampler_en = 1; DRV_LOG(DEBUG, "Sampler enabled!"); } else { @@ -1418,55 +1364,14 @@ err_secondary: } #endif } - if (config->cqe_comp && !sh->dev_cap.cqe_comp) { - DRV_LOG(WARNING, "Rx CQE 128B compression is not supported."); - config->cqe_comp = 0; - } - if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX && - (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) { - DRV_LOG(WARNING, "Flow Tag CQE compression" - " format isn't supported."); - config->cqe_comp = 0; - } - if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX && - (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) { - DRV_LOG(WARNING, "L3/L4 Header CQE compression" - " format isn't supported."); - config->cqe_comp = 0; - } - DRV_LOG(DEBUG, "Rx CQE compression is %ssupported", - config->cqe_comp ? "" : "not "); - if (config->tx_pp && !sh->dev_cap.txpp_en) { - DRV_LOG(ERR, "Packet pacing is not supported."); - err = ENODEV; + /* Process parameters and store port configuration on priv structure. */ + err = mlx5_port_args_config(priv, mkvlist, &priv->config); + if (err) { + err = rte_errno; + DRV_LOG(ERR, "Failed to process port configure: %s", + strerror(rte_errno)); goto error; } - if (config->std_delay_drop || config->hp_delay_drop) { - if (!hca_attr->rq_delay_drop) { - config->std_delay_drop = 0; - config->hp_delay_drop = 0; - DRV_LOG(WARNING, - "dev_port-%u: Rxq delay drop is not supported", - priv->dev_port); - } - } - /* - * If HW has bug working with tunnel packet decapsulation and - * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip - * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. - */ - if (sh->dev_cap.scatter_fcs_w_decap_disable && config->decap_en) - config->hw_fcs_strip = 0; - else - config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip; - DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", - (config->hw_fcs_strip ? "" : "not ")); - if (config->mprq.enabled && !sh->dev_cap.mprq.enabled) { - DRV_LOG(WARNING, "Multi-Packet RQ isn't supported."); - config->mprq.enabled = 0; - } - if (config->max_dump_files_num == 0) - config->max_dump_files_num = 128; eth_dev = rte_eth_dev_allocate(name); if (eth_dev == NULL) { DRV_LOG(ERR, "can not allocate rte ethdev"); @@ -1546,7 +1451,7 @@ err_secondary: eth_dev->rx_queue_count = mlx5_rx_queue_count; /* Register MAC address. */ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); - if (sh->dev_cap.vf && config->vf_nl_en) + if (sh->dev_cap.vf && sh->config.vf_nl_en) mlx5_nl_mac_addr_sync(priv->nl_socket_route, mlx5_ifindex(eth_dev), eth_dev->data->mac_addrs, @@ -1567,13 +1472,9 @@ err_secondary: * Verbs context returned by ibv_open_device(). */ mlx5_link_update(eth_dev, 0); - /* Detect minimal data bytes to inline. */ - mlx5_set_min_inline(spawn, config); - /* Store device configuration on private structure. */ - priv->config = *config; for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { - icfg[i].release_mem_en = !!config->reclaim_mode; - if (config->reclaim_mode) + icfg[i].release_mem_en = !!sh->config.reclaim_mode; + if (sh->config.reclaim_mode) icfg[i].per_core_cache = 0; priv->flows[i] = mlx5_ipool_create(&icfg[i]); if (!priv->flows[i]) @@ -1581,15 +1482,14 @@ err_secondary: } /* Create context for virtual machine VLAN workaround. */ priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); - if (config->dv_flow_en) { + if (sh->config.dv_flow_en) { err = mlx5_alloc_shared_dr(priv); if (err) goto error; if (mlx5_flex_item_port_init(eth_dev) < 0) goto error; } - if (sh->cdev->config.devx && config->dv_flow_en && - sh->dev_cap.dest_tir) { + if (mlx5_devx_obj_ops_en(sh)) { priv->obj_ops = devx_obj_ops; mlx5_queue_counter_id_prepare(eth_dev); priv->obj_ops.lb_dummy_queue_create = @@ -1604,7 +1504,7 @@ err_secondary: } else { priv->obj_ops = ibv_obj_ops; } - if (config->tx_pp && + if (sh->config.tx_pp && priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) { /* * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support @@ -1621,6 +1521,17 @@ err_secondary: priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); if (!priv->drop_queue.hrxq) goto error; + priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, + mlx5_hrxq_create_cb, + mlx5_hrxq_match_cb, + mlx5_hrxq_remove_cb, + mlx5_hrxq_clone_cb, + mlx5_hrxq_clone_free_cb); + if (!priv->hrxqs) + goto error; + rte_rwlock_init(&priv->ind_tbls_lock); + if (priv->sh->config.dv_flow_en == 2) + return eth_dev; /* Port representor shares the same max priority with pf port. */ if (!priv->sh->flow_priority_check_flag) { /* Supported Verbs flow priority number detection. */ @@ -1635,23 +1546,14 @@ err_secondary: goto error; } mlx5_set_metadata_mask(eth_dev); - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && !priv->sh->dv_regc0_mask) { DRV_LOG(ERR, "metadata mode %u is not supported " "(no metadata reg_c[0] is available)", - priv->config.dv_xmeta_en); + sh->config.dv_xmeta_en); err = ENOTSUP; goto error; } - priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, - mlx5_hrxq_create_cb, - mlx5_hrxq_match_cb, - mlx5_hrxq_remove_cb, - mlx5_hrxq_clone_cb, - mlx5_hrxq_clone_free_cb); - if (!priv->hrxqs) - goto error; - rte_rwlock_init(&priv->ind_tbls_lock); /* Query availability of metadata reg_c's. */ if (!priv->sh->metadata_regc_check_flag) { err = mlx5_flow_discover_mreg_c(eth_dev); @@ -1664,16 +1566,16 @@ err_secondary: DRV_LOG(DEBUG, "port %u extensive metadata register is not supported", eth_dev->data->port_id); - if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { + if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { DRV_LOG(ERR, "metadata mode %u is not supported " "(no metadata registers available)", - priv->config.dv_xmeta_en); + sh->config.dv_xmeta_en); err = ENOTSUP; goto error; } } - if (priv->config.dv_flow_en && - priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && + if (sh->config.dv_flow_en && + sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && mlx5_flow_ext_mreg_supported(eth_dev) && priv->sh->dv_regc0_mask) { priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, @@ -1692,7 +1594,7 @@ err_secondary: rte_spinlock_init(&priv->shared_act_sl); mlx5_flow_counter_mode_config(eth_dev); mlx5_flow_drop_action_config(eth_dev); - if (priv->config.dv_flow_en) + if (sh->config.dv_flow_en) eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; return eth_dev; error: @@ -1939,30 +1841,6 @@ mlx5_device_bond_pci_match(const char *ibdev_name, return pf; } -static void -mlx5_os_config_default(struct mlx5_dev_config *config) -{ - memset(config, 0, sizeof(*config)); - config->mps = MLX5_ARG_UNSET; - config->cqe_comp = 1; - config->rx_vec_en = 1; - config->txq_inline_max = MLX5_ARG_UNSET; - config->txq_inline_min = MLX5_ARG_UNSET; - config->txq_inline_mpw = MLX5_ARG_UNSET; - config->txqs_inline = MLX5_ARG_UNSET; - config->vf_nl_en = 1; - config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; - config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; - config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; - config->dv_esw_en = 1; - config->dv_flow_en = 1; - config->decap_en = 1; - config->log_hp_size = MLX5_ARG_UNSET; - config->allow_duplicate_pattern = 1; - config->std_delay_drop = 0; - config->hp_delay_drop = 0; -} - /** * Register a PCI device within bonding. * @@ -1975,6 +1853,8 @@ mlx5_os_config_default(struct mlx5_dev_config *config) * Requested ethdev device argument. * @param[in] owner_id * Requested owner PF port ID within bonding device, default to 0. + * @param[in, out] mkvlist + * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. @@ -1982,7 +1862,7 @@ mlx5_os_config_default(struct mlx5_dev_config *config) static int mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, struct rte_eth_devargs *req_eth_da, - uint16_t owner_id) + uint16_t owner_id, struct mlx5_kvargs_ctrl *mkvlist) { struct ibv_device **ibv_list; /* @@ -2011,7 +1891,6 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, int bd = -1; struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev); struct mlx5_dev_spawn_data *list = NULL; - struct mlx5_dev_config dev_config; struct rte_eth_devargs eth_da = *req_eth_da; struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */ struct mlx5_bond_info bond_info; @@ -2353,10 +2232,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, for (i = 0; i != ns; ++i) { uint32_t restore; - /* Default configuration. */ - mlx5_os_config_default(&dev_config); - list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], - &dev_config, ð_da); + list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], ð_da, + mkvlist); if (!list[i].eth_dev) { if (rte_errno != EBUSY && rte_errno != EEXIST) break; @@ -2468,12 +2345,15 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev, * * @param[in] cdev * Pointer to common mlx5 device structure. + * @param[in, out] mkvlist + * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_os_pci_probe(struct mlx5_common_device *cdev) +mlx5_os_pci_probe(struct mlx5_common_device *cdev, + struct mlx5_kvargs_ctrl *mkvlist) { struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev); struct rte_eth_devargs eth_da = { .nb_ports = 0 }; @@ -2488,7 +2368,7 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */ for (p = 0; p < eth_da.nb_ports; p++) { ret = mlx5_os_pci_probe_pf(cdev, ð_da, - eth_da.ports[p]); + eth_da.ports[p], mkvlist); if (ret) break; } @@ -2501,17 +2381,17 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) mlx5_net_remove(cdev); } } else { - ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0); + ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0, mkvlist); } return ret; } /* Probe a single SF device on auxiliary bus, no representor support. */ static int -mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) +mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev, + struct mlx5_kvargs_ctrl *mkvlist) { struct rte_eth_devargs eth_da = { .nb_ports = 0 }; - struct mlx5_dev_config config; struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 }; struct rte_device *dev = cdev->dev; struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev); @@ -2522,8 +2402,6 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) ret = mlx5_os_parse_eth_devargs(dev, ð_da); if (ret != 0) return ret; - /* Set default config data. */ - mlx5_os_config_default(&config); /* Init spawn data. */ spawn.max_port = 1; spawn.phys_port = 1; @@ -2536,7 +2414,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) spawn.ifindex = ret; spawn.cdev = cdev; /* Spawn device. */ - eth_dev = mlx5_dev_spawn(dev, &spawn, &config, ð_da); + eth_dev = mlx5_dev_spawn(dev, &spawn, ð_da, mkvlist); if (eth_dev == NULL) return -rte_errno; /* Post create. */ @@ -2557,12 +2435,15 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) * * @param[in] cdev * Pointer to the common mlx5 device. + * @param[in, out] mkvlist + * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_os_net_probe(struct mlx5_common_device *cdev) +mlx5_os_net_probe(struct mlx5_common_device *cdev, + struct mlx5_kvargs_ctrl *mkvlist) { int ret; @@ -2574,10 +2455,16 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev) strerror(rte_errno)); return -rte_errno; } + ret = mlx5_probe_again_args_validate(cdev, mkvlist); + if (ret) { + DRV_LOG(ERR, "Probe again parameters are not compatible : %s", + strerror(rte_errno)); + return -rte_errno; + } if (mlx5_dev_is_pci(cdev->dev)) - return mlx5_os_pci_probe(cdev); + return mlx5_os_pci_probe(cdev, mkvlist); else - return mlx5_os_auxiliary_probe(cdev); + return mlx5_os_auxiliary_probe(cdev, mkvlist); } /**