#include <rte_rwlock.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
+#include <rte_eal_paging.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
}
}
+/**
+ * DV flow counter mode detect and config.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ */
+void
+mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+ bool fallback;
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+ fallback = true;
+#else
+ fallback = false;
+ if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
+ !hca_attr->flow_counters_dump ||
+ !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
+ (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
+ fallback = true;
+#endif
+ if (fallback)
+ DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
+ "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
+ hca_attr->flow_counters_dump,
+ hca_attr->flow_counter_bulk_alloc_bitmap);
+ /* Initialize fallback mode only on the port initializes sh. */
+ if (sh->refcnt == 1)
+ sh->cmng.counter_fallback = fallback;
+ else if (fallback != sh->cmng.counter_fallback)
+ DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
+ "with others:%d.", PORT_ID(priv), fallback);
+#endif
+}
+
/**
* Initialize the counters management structure.
*
static void
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
int i;
memset(&sh->cmng, 0, sizeof(sh->cmng));
TAILQ_INIT(&sh->cmng.counters[i]);
rte_spinlock_init(&sh->cmng.csl[i]);
}
- if (sh->devx && !haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write = attr->relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read = attr->relaxed_ordering_read;
- }
}
/**
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
LIST_REMOVE(mng, next);
- claim_zero(mlx5_devx_cmd_destroy(mng->dm));
- claim_zero(mlx5_os_umem_dereg(mng->umem));
+ mlx5_os_wrapped_mkey_destroy(&mng->wm);
mlx5_free(mem);
}
uint32_t ids[8];
int ret;
- if (!priv->config.hca_attr.parse_graph_flex_node) {
+ if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
DRV_LOG(ERR, "Dynamic flex parser is not supported "
"for device %s.", priv->dev_data->name);
return -ENOTSUP;
return tn_offloads;
}
-/*
- * Allocate Rx and Tx UARs in robust fashion.
- * This routine handles the following UAR allocation issues:
- *
- * - tries to allocate the UAR with the most appropriate memory
- * mapping type from the ones supported by the host
- *
- * - tries to allocate the UAR with non-NULL base address
- * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as
- * UAR base address if UAR was not the first object in the UAR page.
- * It caused the PMD failure and we should try to get another UAR
- * till we get the first one with non-NULL base address returned.
- */
+/* Fill all fields of UAR structure. */
static int
-mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
- const struct mlx5_common_dev_config *config)
+mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
{
- uint32_t uar_mapping, retry;
- int err = 0;
- void *base_addr;
-
- for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- /* Control the mapping type according to the settings. */
- uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ?
- MLX5DV_UAR_ALLOC_TYPE_NC :
- MLX5DV_UAR_ALLOC_TYPE_BF;
-#else
- RTE_SET_USED(config);
- /*
- * It seems we have no way to control the memory mapping type
- * for the UAR, the default "Write-Combining" type is supposed.
- * The UAR initialization on queue creation queries the
- * actual mapping type done by Verbs/kernel and setups the
- * PMD datapath accordingly.
- */
- uar_mapping = 0;
-#endif
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- if (!sh->tx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
- if (config->dbnc == MLX5_TXDB_CACHED ||
- config->dbnc == MLX5_TXDB_HEURISTIC)
- DRV_LOG(WARNING, "Devarg tx_db_nc setting "
- "is not supported by DevX");
- /*
- * In some environments like virtual machine
- * the Write Combining mapped might be not supported
- * and UAR allocation fails. We try "Non-Cached"
- * mapping for the case. The tx_burst routines take
- * the UAR mapping type into account on UAR setup
- * on queue creation.
- */
- DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
- } else if (!sh->tx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
- if (config->dbnc == MLX5_TXDB_NCACHED)
- DRV_LOG(WARNING, "Devarg tx_db_nc settings "
- "is not supported by DevX");
- /*
- * If Verbs/kernel does not support "Non-Cached"
- * try the "Write-Combining".
- */
- DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
- }
-#endif
- if (!sh->tx_uar) {
- DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)");
- err = ENOMEM;
- goto exit;
- }
- base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar);
- if (base_addr)
- break;
- /*
- * The UARs are allocated by rdma_core within the
- * IB device context, on context closure all UARs
- * will be freed, should be no memory/object leakage.
- */
- DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR");
- sh->tx_uar = NULL;
- }
- /* Check whether we finally succeeded with valid UAR allocation. */
- if (!sh->tx_uar) {
- DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)");
- err = ENOMEM;
- goto exit;
- }
- for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
- uar_mapping = 0;
- sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
- uar_mapping);
-#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
- if (!sh->devx_rx_uar &&
- uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
- /*
- * Rx UAR is used to control interrupts only,
- * should be no datapath noticeable impact,
- * can try "Non-Cached" mapping safely.
- */
- DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
- uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
- sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
- (sh->cdev->ctx, uar_mapping);
- }
-#endif
- if (!sh->devx_rx_uar) {
- DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)");
- err = ENOMEM;
- goto exit;
- }
- base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
- if (base_addr)
- break;
- /*
- * The UARs are allocated by rdma_core within the
- * IB device context, on context closure all UARs
- * will be freed, should be no memory/object leakage.
- */
- DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR");
- sh->devx_rx_uar = NULL;
+ int ret;
+
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
+ return -rte_errno;
}
- /* Check whether we finally succeeded with valid UAR allocation. */
- if (!sh->devx_rx_uar) {
- DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)");
- err = ENOMEM;
+ MLX5_ASSERT(sh->tx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
+ ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
+ mlx5_devx_uar_release(&sh->tx_uar);
+ return -rte_errno;
}
-exit:
- return err;
+ MLX5_ASSERT(sh->rx_uar.obj);
+ MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
+ return 0;
+}
+
+static void
+mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
+{
+ mlx5_devx_uar_release(&sh->rx_uar);
+ mlx5_devx_uar_release(&sh->tx_uar);
}
/**
return 0;
}
+/**
+ * Configure realtime timestamp format.
+ *
+ * @param sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ * @param config
+ * Device configuration parameters.
+ * @param hca_attr
+ * Pointer to DevX HCA capabilities structure.
+ */
+void
+mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_dev_config *config,
+ struct mlx5_hca_attr *hca_attr)
+{
+ uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
+ uint32_t reg[dw_cnt];
+ int ret = ENOTSUP;
+
+ if (hca_attr->access_register_user)
+ ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
+ MLX5_REGISTER_ID_MTUTC, 0,
+ reg, dw_cnt);
+ if (!ret) {
+ uint32_t ts_mode;
+
+ /* MTUTC register is read successfully. */
+ ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
+ if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
+ config->rt_timestamp = 1;
+ } else {
+ /* Kernel does not support register reading. */
+ if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
+ config->rt_timestamp = 1;
+ }
+}
+
/**
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
MLX5_ASSERT(spawn->max_port);
sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
sizeof(struct mlx5_dev_ctx_shared) +
- spawn->max_port *
- sizeof(struct mlx5_dev_shared_port),
+ spawn->max_port * sizeof(struct mlx5_dev_shared_port),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!sh) {
- DRV_LOG(ERR, "shared context allocation failure");
- rte_errno = ENOMEM;
+ DRV_LOG(ERR, "Shared context allocation failure.");
+ rte_errno = ENOMEM;
goto exit;
}
pthread_mutex_init(&sh->txpp.mutex, NULL);
sh->numa_node = spawn->cdev->dev->numa_node;
sh->cdev = spawn->cdev;
- sh->devx = sh->cdev->config.devx;
+ sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+ err = mlx5_os_capabilities_prepare(sh);
if (err) {
- DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+ DRV_LOG(ERR, "Fail to configure device capabilities.");
goto error;
}
sh->refcnt = 1;
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
sizeof(sh->ibdev_path) - 1);
/*
- * Setting port_id to max unallowed value means
- * there is no interrupt subhandler installed for
- * the given port index i.
+ * Setting port_id to max unallowed value means there is no interrupt
+ * subhandler installed for the given port index i.
*/
for (i = 0; i < sh->max_port; i++) {
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
}
- if (sh->devx) {
+ if (sh->cdev->config.devx) {
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
if (!sh->td) {
DRV_LOG(ERR, "TD allocation failure");
- err = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
if (mlx5_setup_tis(sh)) {
DRV_LOG(ERR, "TIS allocation failure");
- err = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
- err = mlx5_alloc_rxtx_uars(sh, &sh->cdev->config);
+ err = mlx5_rxtx_uars_prepare(sh);
if (err)
goto error;
- MLX5_ASSERT(sh->tx_uar);
- MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar));
-
- MLX5_ASSERT(sh->devx_rx_uar);
- MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));
- }
#ifndef RTE_ARCH_64
- /* Initialize UAR access locks for 32bit implementations. */
- rte_spinlock_init(&sh->uar_lock_cq);
- for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
- rte_spinlock_init(&sh->uar_lock[i]);
+ } else {
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
#endif
+ }
mlx5_os_dev_shared_handler_install(sh);
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
+ err = rte_errno;
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->td)
- claim_zero(mlx5_devx_cmd_destroy(sh->td));
+ mlx5_rxtx_uars_release(sh);
i = 0;
do {
if (sh->tis[i])
claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
} while (++i < (uint32_t)sh->bond.n_port);
- if (sh->devx_rx_uar)
- mlx5_glue->devx_free_uar(sh->devx_rx_uar);
- if (sh->tx_uar)
- mlx5_glue->devx_free_uar(sh->tx_uar);
+ if (sh->td)
+ claim_zero(mlx5_devx_cmd_destroy(sh->td));
mlx5_free(sh);
- MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
+ if (sh->ct_mng)
+ mlx5_flow_aso_ct_mng_close(sh);
if (sh->aso_age_mng) {
mlx5_flow_aso_age_mng_close(sh);
sh->aso_age_mng = NULL;
mlx5_aso_flow_mtrs_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
- if (sh->tx_uar) {
- mlx5_glue->devx_free_uar(sh->tx_uar);
- sh->tx_uar = NULL;
- }
+ mlx5_rxtx_uars_release(sh);
do {
if (sh->tis[i])
claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
} while (++i < sh->bond.n_port);
if (sh->td)
claim_zero(mlx5_devx_cmd_destroy(sh->td));
- if (sh->devx_rx_uar)
- mlx5_glue->devx_free_uar(sh->devx_rx_uar);
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
pthread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
* UAR register table follows the process private structure. BlueFlame
* registers for Tx queues are stored in the table.
*/
- ppriv_size =
- sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
+ ppriv_size = sizeof(struct mlx5_proc_priv) +
+ priv->txqs_n * sizeof(struct mlx5_uar_data);
ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
RTE_CACHE_LINE_SIZE, dev->device->numa_node);
if (!ppriv) {
}
ppriv->uar_table_sz = priv->txqs_n;
dev->process_private = ppriv;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ priv->sh->pppriv = ppriv;
return 0;
}
mlx5_action_handle_flush(dev);
mlx5_flow_meter_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
rte_wmb();
/* Disable datapath on secondary process. */
mlx5_mp_os_req_stop_rxtx(dev);
if (priv->mreg_cp_tbl)
mlx5_hlist_destroy(priv->mreg_cp_tbl);
mlx5_mprq_free_mp(dev);
- if (priv->sh->ct_mng)
- mlx5_flow_aso_ct_mng_close(priv->sh);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
mlx5_free(priv->rss_conf.rss_key);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
- * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
+ * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
* ifindex if Netlink fails.
*/
mlx5_free_shared_dev_ctx(priv->sh);
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
config->mprq.enabled = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
- config->mprq.stride_num_n = tmp;
+ config->mprq.log_stride_num = tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
- config->mprq.stride_size_n = tmp;
+ config->mprq.log_stride_size = tmp;
} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
config->mprq.max_memcpy_len = tmp;
} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
if (tmp != MLX5_RCM_NONE &&
tmp != MLX5_RCM_LIGHT &&
tmp != MLX5_RCM_AGGR) {
- DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+ DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
rte_errno = EINVAL;
return -rte_errno;
}
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
config->allow_duplicate_pattern = !!tmp;
} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
- config->std_delay_drop = tmp & MLX5_DELAY_DROP_STANDARD;
- config->hp_delay_drop = tmp & MLX5_DELAY_DROP_HAIRPIN;
+ config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
+ config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
} else {
- DRV_LOG(WARNING, "%s: unknown parameter", key);
- rte_errno = EINVAL;
- return -rte_errno;
+ DRV_LOG(WARNING,
+ "%s: unknown parameter, maybe it's for another class.",
+ key);
}
return 0;
}
int
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
- const char **params = (const char *[]){
- MLX5_DRIVER_KEY,
- MLX5_RXQ_CQE_COMP_EN,
- MLX5_RXQ_PKT_PAD_EN,
- MLX5_RX_MPRQ_EN,
- MLX5_RX_MPRQ_LOG_STRIDE_NUM,
- MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
- MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
- MLX5_RXQS_MIN_MPRQ,
- MLX5_TXQ_INLINE,
- MLX5_TXQ_INLINE_MIN,
- MLX5_TXQ_INLINE_MAX,
- MLX5_TXQ_INLINE_MPW,
- MLX5_TXQS_MIN_INLINE,
- MLX5_TXQS_MAX_VEC,
- MLX5_TXQ_MPW_EN,
- MLX5_TXQ_MPW_HDR_DSEG_EN,
- MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TX_DB_NC,
- MLX5_TX_PP,
- MLX5_TX_SKEW,
- MLX5_TX_VEC_EN,
- MLX5_RX_VEC_EN,
- MLX5_L3_VXLAN_EN,
- MLX5_VF_NL_EN,
- MLX5_DV_ESW_EN,
- MLX5_DV_FLOW_EN,
- MLX5_DV_XMETA_EN,
- MLX5_LACP_BY_USER,
- MLX5_MR_EXT_MEMSEG_EN,
- MLX5_REPRESENTOR,
- MLX5_MAX_DUMP_FILES_NUM,
- MLX5_LRO_TIMEOUT_USEC,
- RTE_DEVARGS_KEY_CLASS,
- MLX5_HP_BUF_SIZE,
- MLX5_RECLAIM_MEM,
- MLX5_SYS_MEM_EN,
- MLX5_DECAP_EN,
- MLX5_ALLOW_DUPLICATE_PATTERN,
- MLX5_MR_MEMPOOL_REG_EN,
- MLX5_DELAY_DROP,
- NULL,
- };
struct rte_kvargs *kvlist;
int ret = 0;
- int i;
if (devargs == NULL)
return 0;
/* Following UGLY cast is done to pass checkpatch. */
- kvlist = rte_kvargs_parse(devargs->args, params);
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
/* Process parameters. */
- for (i = 0; (params[i] != NULL); ++i) {
- if (rte_kvargs_count(kvlist, params[i])) {
- ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, config);
- if (ret) {
- rte_errno = EINVAL;
- rte_kvargs_free(kvlist);
- return -rte_errno;
- }
- }
+ ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
+ if (ret) {
+ rte_errno = EINVAL;
+ ret = -rte_errno;
}
rte_kvargs_free(kvlist);
- return 0;
+ return ret;
}
/**
mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
struct mlx5_dev_config *config)
{
+ struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
if (spawn->pci_dev != NULL) {
}
goto exit;
}
- if (config->hca_attr.eth_net_offloads) {
+ if (hca_attr->eth_net_offloads) {
/* We have DevX enabled, inline mode queried successfully. */
- switch (config->hca_attr.wqe_inline_mode) {
+ switch (hca_attr->wqe_inline_mode) {
case MLX5_CAP_INLINE_MODE_L2:
/* outer L2 header must be inlined. */
config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
/* No inline data are required by NIC. */
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
config->hw_vlan_insert =
- config->hca_attr.wqe_vlan_insert;
+ hca_attr->wqe_vlan_insert;
DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
goto exit;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
/* inline mode is defined by NIC vport context. */
- if (!config->hca_attr.eth_virt)
+ if (!hca_attr->eth_virt)
break;
- switch (config->hca_attr.vport_inline_mode) {
+ switch (hca_attr->vport_inline_mode) {
case MLX5_INLINE_MODE_NONE:
config->txq_inline_min =
MLX5_INLINE_HSIZE_NONE;
break;
}
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
- DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
sh->dv_mark_mask, mark);
else
sh->dv_mark_mask = mark;
if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
- DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
sh->dv_meta_mask, meta);
else
sh->dv_meta_mask = meta;
if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
- DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
sh->dv_meta_mask, reg_c0);
else
sh->dv_regc0_mask = reg_c0;
}
/**
- * Comparison callback to sort device data.
+ * Check sibling device configurations.
*
- * This is meant to be used with qsort().
+ * Sibling devices sharing the Infiniband device context should have compatible
+ * configurations. This regards representors and bonding device.
*
- * @param a[in]
- * Pointer to pointer to first data object.
- * @param b[in]
- * Pointer to pointer to second data object.
+ * @param sh
+ * Shared device context.
+ * @param config
+ * Configuration of the device is going to be created.
+ * @param dpdk_dev
+ * Backing DPDK device.
*
* @return
- * 0 if both objects are equal, less than 0 if the first argument is less
- * than the second, greater than 0 otherwise.
+ * 0 on success, EINVAL otherwise
*/
int
-mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
+mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
struct mlx5_dev_config *config,
struct rte_device *dpdk_dev)
{
- struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_dev_config *sh_conf = NULL;
uint16_t port_id;
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
- if (opriv && opriv != priv && opriv->sh == sh) {
+ if (opriv && opriv->sh == sh) {
sh_conf = &opriv->config;
break;
}