ibv_match = ibv_list[n];
break;
}
- if (ibv_match == NULL)
+ if (ibv_match == NULL) {
+ DRV_LOG(WARNING,
+ "No Verbs device matches PCI device " PCI_PRI_FMT ","
+ " are kernel drivers loaded?",
+ addr->domain, addr->bus, addr->devid, addr->function);
rte_errno = ENOENT;
+ }
mlx5_glue->free_device_list(ibv_list);
return ibv_match;
}
*
* @param cdev
* Pointer to the mlx5 device.
- * @param ctx_ptr
- * Pointer to fill inside pointer to device context.
+ * @param classes
+ * Chosen classes come from device arguments.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx_ptr)
+mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes)
{
struct ibv_device *ibv;
struct ibv_context *ctx = NULL;
if (ctx) {
cdev->config.devx = 1;
DRV_LOG(DEBUG, "DevX is supported.");
- } else {
+ } else if (classes == MLX5_CLASS_ETH) {
/* The environment variable is still configured. */
ctx = mlx5_glue->open_device(ibv);
if (ctx == NULL)
goto error;
DRV_LOG(DEBUG, "DevX is NOT supported.");
+ } else {
+ goto error;
}
/* The device is created, no need for environment. */
mlx5_restore_doorbell_mapping_env(dbmap_env);
/* Hint libmlx5 to use PMD allocator for data plane resources */
mlx5_set_context_attr(cdev->dev, ctx);
- *ctx_ptr = (void *)ctx;
+ cdev->ctx = ctx;
return 0;
error:
rte_errno = errno ? errno : ENODEV;
#endif
}
+/**
+ * Uninitialize all HW global of device context.
+ *
+ * @param cdev
+ * Pointer to mlx5 device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static void
+mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
+{
+ if (cdev->ctx != NULL) {
+ claim_zero(mlx5_glue->close_device(cdev->ctx));
+ cdev->ctx = NULL;
+ }
+}
+
+/**
+ * Initialize all HW global of device context.
+ *
+ * @param cdev
+ * Pointer to mlx5 device structure.
+ * @param classes
+ * Chosen classes come from user device arguments.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
+{
+ int ret;
+
+ /* Create context device */
+ ret = mlx5_os_open_device(cdev, classes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
static void
mlx5_common_dev_release(struct mlx5_common_device *cdev)
{
pthread_mutex_lock(&devices_list_lock);
TAILQ_REMOVE(&devices_list, cdev, next);
pthread_mutex_unlock(&devices_list_lock);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ mlx5_dev_hw_global_release(cdev);
rte_free(cdev);
}
static struct mlx5_common_device *
-mlx5_common_dev_create(struct rte_device *eal_dev)
+mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
{
struct mlx5_common_device *cdev;
int ret;
return NULL;
}
mlx5_malloc_mem_select(cdev->config.sys_mem_en);
+ /* Initialize all HW global of device context. */
+ ret = mlx5_dev_hw_global_prepare(cdev, classes);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to initialize device context.");
+ rte_free(cdev);
+ return NULL;
+ }
exit:
pthread_mutex_lock(&devices_list_lock);
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
classes = MLX5_CLASS_ETH;
cdev = to_mlx5_device(eal_dev);
if (!cdev) {
- cdev = mlx5_common_dev_create(eal_dev);
+ cdev = mlx5_common_dev_create(eal_dev, classes);
if (!cdev)
return -ENOMEM;
new_device = true;
struct rte_device *dev;
TAILQ_ENTRY(mlx5_common_device) next;
uint32_t classes_loaded;
+ void *ctx; /* Verbs/DV/DevX context. */
struct mlx5_common_dev_config config; /* Device configuration. */
};
/* mlx5_common_os.c */
-__rte_internal
-int mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx);
+int mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes);
#endif /* RTE_PMD_MLX5_COMMON_H_ */
mlx5_os_dealloc_pd;
mlx5_os_dereg_mr;
mlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT
- mlx5_os_open_device;
mlx5_os_reg_mr;
mlx5_os_umem_dereg;
mlx5_os_umem_reg;
#include <stdio.h>
#include <rte_mempool.h>
+#include <rte_bus_pci.h>
#include <rte_malloc.h>
#include <rte_errno.h>
#include "mlx5_malloc.h"
/**
- * Initialization routine for run-time dependency on external lib
+ * Initialization routine for run-time dependency on external lib.
*/
void
mlx5_glue_constructor(void)
}
/**
- * Allocate PD. Given a devx context object
+ * Allocate PD. Given a DevX context object
* return an mlx5-pd object.
*
* @param[in] ctx
void *
mlx5_os_alloc_pd(void *ctx)
{
- struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_pd), 0, SOCKET_ID_ANY);
+ struct mlx5_pd *ppd = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_pd),
+ 0, SOCKET_ID_ANY);
if (!ppd)
return NULL;
* Pointer to mlx5_pd.
*
* @return
- * Zero if pd is released successfully, negative number otherwise.
+ * Zero if pd is released successfully, negative number otherwise.
*/
int
mlx5_os_dealloc_pd(void *pd)
*
* This function calls the Windows glue APIs to open a device.
*
- * @param dev
+ * @param cdev
* Pointer to mlx5 device structure.
- * @param ctx
- * Pointer to fill inside pointer to device context.
+ * @param classes
+ * Chosen classes come from user device arguments.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_os_open_device(struct mlx5_common_device *cdev, void **ctx)
+mlx5_os_open_device(struct mlx5_common_device *cdev, uint32_t classes)
{
struct devx_device_bdf *devx_bdf_dev = NULL;
struct devx_device_bdf *devx_list;
struct mlx5_context *mlx5_ctx = NULL;
int n;
+ if (classes != MLX5_CLASS_ETH) {
+ DRV_LOG(ERR,
+ "The chosen classes are not supported on Windows.");
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
errno = 0;
devx_list = mlx5_glue->get_device_list(&n);
if (devx_list == NULL) {
goto error;
}
cdev->config.devx = 1;
- *ctx = (void *)mlx5_ctx;
+ cdev->ctx = mlx5_ctx;
mlx5_glue->free_device_list(devx_list);
return 0;
error:
struct mlx5_compress_priv {
TAILQ_ENTRY(mlx5_compress_priv) next;
- struct ibv_context *ctx; /* Device context. */
struct rte_compressdev *compressdev;
+ struct mlx5_common_device *cdev; /* Backend mlx5 device. */
void *uar;
uint32_t pdn; /* Protection Domain number. */
uint8_t min_block_size;
rte_errno = ENOMEM;
goto err;
}
- ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr,
+ ret = mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq, log_ops_n, &cq_attr,
socket_id);
if (ret != 0) {
DRV_LOG(ERR, "Failed to create CQ.");
qp_attr.sq_size = RTE_BIT32(log_ops_n);
qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp
&& priv->mmo_dma_qp;
- ret = mlx5_devx_qp_create(priv->ctx, &qp->qp, log_ops_n, &qp_attr,
+ ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, log_ops_n, &qp_attr,
socket_id);
if (ret != 0) {
DRV_LOG(ERR, "Failed to create QP.");
struct mlx5dv_pd pd_info;
int ret;
- priv->pd = mlx5_glue->alloc_pd(priv->ctx);
+ priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
if (priv->pd == NULL) {
DRV_LOG(ERR, "Failed to allocate PD.");
return errno ? -errno : -ENOMEM;
{
if (mlx5_compress_pd_create(priv) != 0)
return -1;
- priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
+ priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) ==
NULL) {
rte_errno = errno;
/* Iterate all the existing mlx5 devices. */
TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
mlx5_free_mr_by_addr(&priv->mr_scache,
- priv->ctx->device->name,
+ mlx5_os_get_ctx_device_name
+ (priv->cdev->ctx),
addr, len);
pthread_mutex_unlock(&priv_list_lock);
break;
static int
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
{
- struct ibv_device *ibv;
struct rte_compressdev *compressdev;
- struct ibv_context *ctx;
struct mlx5_compress_priv *priv;
struct mlx5_hca_attr att = { 0 };
struct rte_compressdev_pmd_init_params init_params = {
.name = "",
.socket_id = cdev->dev->numa_node,
};
+ const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DRV_LOG(ERR, "Non-primary process type is not supported.");
rte_errno = ENOTSUP;
return -rte_errno;
}
- ibv = mlx5_os_get_ibv_dev(cdev->dev);
- if (ibv == NULL)
- return -rte_errno;
- ctx = mlx5_glue->dv_open_device(ibv);
- if (ctx == NULL) {
- DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
- rte_errno = ENODEV;
- return -rte_errno;
- }
- if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 ||
+ if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &att) != 0 ||
((att.mmo_compress_sq_en == 0 || att.mmo_decompress_sq_en == 0 ||
att.mmo_dma_sq_en == 0) && (att.mmo_compress_qp_en == 0 ||
att.mmo_decompress_qp_en == 0 || att.mmo_dma_qp_en == 0))) {
DRV_LOG(ERR, "Not enough capabilities to support compress "
"operations, maybe old FW/OFED version?");
- claim_zero(mlx5_glue->close_device(ctx));
rte_errno = ENOTSUP;
return -ENOTSUP;
}
- compressdev = rte_compressdev_pmd_create(ibv->name, cdev->dev,
+ compressdev = rte_compressdev_pmd_create(ibdev_name, cdev->dev,
sizeof(*priv), &init_params);
if (compressdev == NULL) {
- DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
- claim_zero(mlx5_glue->close_device(ctx));
+ DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
return -ENODEV;
}
DRV_LOG(INFO,
- "Compress device %s was created successfully.", ibv->name);
+ "Compress device %s was created successfully.", ibdev_name);
compressdev->dev_ops = &mlx5_compress_ops;
compressdev->dequeue_burst = mlx5_compress_dequeue_burst;
compressdev->enqueue_burst = mlx5_compress_enqueue_burst;
priv->mmo_comp_qp = att.mmo_compress_qp_en;
priv->mmo_dma_sq = att.mmo_dma_sq_en;
priv->mmo_dma_qp = att.mmo_dma_qp_en;
- priv->ctx = ctx;
+ priv->cdev = cdev;
priv->compressdev = compressdev;
priv->min_block_size = att.compress_min_block_size;
priv->qp_ts_format = att.qp_ts_format;
if (mlx5_compress_hw_global_prepare(priv) != 0) {
rte_compressdev_pmd_destroy(priv->compressdev);
- claim_zero(mlx5_glue->close_device(priv->ctx));
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_compress_hw_global_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
- claim_zero(mlx5_glue->close_device(priv->ctx));
rte_errno = ENOMEM;
return -rte_errno;
}
mlx5_mr_release_cache(&priv->mr_scache);
mlx5_compress_hw_global_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
- claim_zero(mlx5_glue->close_device(priv->ctx));
}
return 0;
}
for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
- qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
+ qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &attr);
if (!qp->mkey[i])
goto error;
}
rte_errno = ENOMEM;
return -rte_errno;
}
- if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc,
+ if (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,
&cq_attr, socket_id) != 0) {
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
attr.rq_size = 0;
attr.sq_size = RTE_BIT32(log_nb_desc);
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr,
- socket_id);
+ ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj, log_nb_desc,
+ &attr, socket_id);
if (ret) {
DRV_LOG(ERR, "Failed to create QP.");
goto error;
struct mlx5dv_pd pd_info;
int ret;
- priv->pd = mlx5_glue->alloc_pd(priv->ctx);
+ priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
if (priv->pd == NULL) {
DRV_LOG(ERR, "Failed to allocate PD.");
return errno ? -errno : -ENOMEM;
{
if (mlx5_crypto_pd_create(priv) != 0)
return -1;
- priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
+ priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (priv->uar)
priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
if (priv->uar == NULL || priv->uar_addr == NULL) {
/* Iterate all the existing mlx5 devices. */
TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
mlx5_free_mr_by_addr(&priv->mr_scache,
- priv->ctx->device->name,
+ mlx5_os_get_ctx_device_name
+ (priv->cdev->ctx),
addr, len);
pthread_mutex_unlock(&priv_list_lock);
break;
static int
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
{
- struct ibv_device *ibv;
struct rte_cryptodev *crypto_dev;
- struct ibv_context *ctx;
struct mlx5_devx_obj *login;
struct mlx5_crypto_priv *priv;
struct mlx5_crypto_devarg_params devarg_prms = { 0 };
.max_nb_queue_pairs =
RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
};
+ const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
uint16_t rdmw_wqe_size;
int ret;
rte_errno = ENOTSUP;
return -rte_errno;
}
- ibv = mlx5_os_get_ibv_dev(cdev->dev);
- if (ibv == NULL)
- return -rte_errno;
- ctx = mlx5_glue->dv_open_device(ibv);
- if (ctx == NULL) {
- DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
- rte_errno = ENODEV;
- return -rte_errno;
- }
- if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 ||
+ if (mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr) != 0 ||
attr.crypto == 0 || attr.aes_xts == 0) {
DRV_LOG(ERR, "Not enough capabilities to support crypto "
"operations, maybe old FW/OFED version?");
- claim_zero(mlx5_glue->close_device(ctx));
rte_errno = ENOTSUP;
return -ENOTSUP;
}
ret = mlx5_crypto_parse_devargs(cdev->dev->devargs, &devarg_prms);
if (ret) {
DRV_LOG(ERR, "Failed to parse devargs.");
- claim_zero(mlx5_glue->close_device(ctx));
return -rte_errno;
}
- login = mlx5_devx_cmd_create_crypto_login_obj(ctx,
+ login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx,
&devarg_prms.login_attr);
if (login == NULL) {
DRV_LOG(ERR, "Failed to configure login.");
- claim_zero(mlx5_glue->close_device(ctx));
return -rte_errno;
}
- crypto_dev = rte_cryptodev_pmd_create(ibv->name, cdev->dev,
+ crypto_dev = rte_cryptodev_pmd_create(ibdev_name, cdev->dev,
&init_params);
if (crypto_dev == NULL) {
- DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
- claim_zero(mlx5_glue->close_device(ctx));
+ DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name);
return -ENODEV;
}
DRV_LOG(INFO,
- "Crypto device %s was created successfully.", ibv->name);
+ "Crypto device %s was created successfully.", ibdev_name);
crypto_dev->dev_ops = &mlx5_crypto_ops;
crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
crypto_dev->driver_id = mlx5_crypto_driver_id;
priv = crypto_dev->data->dev_private;
- priv->ctx = ctx;
+ priv->cdev = cdev;
priv->login_obj = login;
priv->crypto_dev = crypto_dev;
priv->qp_ts_format = attr.qp_ts_format;
if (mlx5_crypto_hw_global_prepare(priv) != 0) {
rte_cryptodev_pmd_destroy(priv->crypto_dev);
- claim_zero(mlx5_glue->close_device(priv->ctx));
return -1;
}
if (mlx5_mr_btree_init(&priv->mr_scache.cache,
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_crypto_hw_global_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
- claim_zero(mlx5_glue->close_device(priv->ctx));
rte_errno = ENOMEM;
return -rte_errno;
}
mlx5_crypto_hw_global_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
- claim_zero(mlx5_glue->close_device(priv->ctx));
}
return 0;
}
struct mlx5_crypto_priv {
TAILQ_ENTRY(mlx5_crypto_priv) next;
- struct ibv_context *ctx; /* Device context. */
+ struct mlx5_common_device *cdev; /* Backend mlx5 device. */
struct rte_cryptodev *crypto_dev;
void *uar; /* User Access Region. */
volatile uint64_t *uar_addr;
return NULL;
}
memcpy(&dek_attr.key, cipher_ctx->key.data, cipher_ctx->key.length);
- dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->ctx, &dek_attr);
+ dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->cdev->ctx,
+ &dek_attr);
if (dek->obj == NULL) {
rte_free(dek);
return NULL;
mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_context *ctx = priv->sh->ctx;
+ struct ibv_context *ctx = priv->sh->cdev->ctx;
struct ibv_values_ex values;
int err = 0;
struct rte_eth_dev *dev;
uint32_t tmp;
- if (mlx5_glue->get_async_event(sh->ctx, &event))
+ if (mlx5_glue->get_async_event(sh->cdev->ctx, &event))
break;
/* Retrieve and check IB port index. */
tmp = (uint32_t)event.element.port_num;
struct ibv_device_attr device_attr;
struct mlx5_priv *priv = dev->data->dev_private;
- if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
+ if (mlx5_glue->query_device(priv->sh->cdev->ctx, &device_attr) == EIO)
return 1;
return 0;
}
case MLX5_MP_REQ_VERBS_CMD_FD:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
mp_res.num_fds = 1;
- mp_res.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
+ mp_res.fds[0] = ((struct ibv_context *)cdev->ctx)->cmd_fd;
res->result = 0;
ret = rte_mp_reply(&mp_res, peer);
break;
mp_init_msg(&priv->mp_id, &mp_req, type);
if (type == MLX5_MP_REQ_START_RXTX) {
mp_req.num_fds = 1;
- mp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
+ mp_req.fds[0] =
+ ((struct ibv_context *)priv->sh->cdev->ctx)->cmd_fd;
}
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
metadata_reg_c_0, 0xffff);
}
#endif
- matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx,
+ matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->cdev->ctx,
&dv_attr, tbl);
if (matcher) {
priv->sh->misc5_cap = 1;
void *domain;
/* Reference counter is zero, we should initialize structures. */
- domain = mlx5_glue->dr_create_domain(sh->ctx,
+ domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
if (!domain) {
DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
goto error;
}
sh->rx_domain = domain;
- domain = mlx5_glue->dr_create_domain(sh->ctx,
+ domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
if (!domain) {
DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
sh->tx_domain = domain;
#ifdef HAVE_MLX5DV_DR_ESWITCH
if (priv->config.dv_esw_en) {
- domain = mlx5_glue->dr_create_domain
- (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
+ domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
+ MLX5DV_DR_DOMAIN_TYPE_FDB);
if (!domain) {
DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
err = errno;
mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- void *ctx = priv->sh->ctx;
+ void *ctx = priv->sh->cdev->ctx;
priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
if (!priv->q_counters) {
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
#endif
- mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
+ mlx5_glue->dv_query_device(sh->cdev->ctx, &dv_attr);
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
DRV_LOG(DEBUG, "enhanced MPW is supported");
#endif
config->mpls_en = mpls_en;
/* Check port status. */
- err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
+ err = mlx5_glue->query_port(sh->cdev->ctx, spawn->phys_port,
+ &port_attr);
if (err) {
DRV_LOG(ERR, "port query failed: %s", strerror(err));
goto error;
* register is defined by mask.
*/
if (switch_info->representor || switch_info->master) {
- err = mlx5_glue->devx_port_query(sh->ctx,
+ err = mlx5_glue->devx_port_query(sh->cdev->ctx,
spawn->phys_port,
&vport_info);
if (err) {
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->devx) {
- err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
+ err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
+ &config->hca_attr);
if (err) {
err = -err;
goto error;
err = config->hca_attr.access_register_user ?
mlx5_devx_cmd_register_read
- (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+ (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
if (!err) {
uint32_t ts_mode;
/**
* Match PCI information for possible slaves of bonding device.
*
- * @param[in] ibv_dev
- * Pointer to Infiniband device structure.
+ * @param[in] ibdev_name
+ * Name of Infiniband device.
* @param[in] pci_dev
* Pointer to primary PCI address structure to match.
* @param[in] nl_rdma
* Netlink RDMA group socket handle.
* @param[in] owner
- * Rerepsentor owner PF index.
+ * Representor owner PF index.
* @param[out] bond_info
* Pointer to bonding information.
*
* positive index of slave PF in bonding.
*/
static int
-mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
+mlx5_device_bond_pci_match(const char *ibdev_name,
const struct rte_pci_addr *pci_dev,
int nl_rdma, uint16_t owner,
struct mlx5_bond_info *bond_info)
int ret;
/*
- * Try to get master device name. If something goes
- * wrong suppose the lack of kernel support and no
- * bonding devices.
+ * Try to get master device name. If something goes wrong suppose
+ * the lack of kernel support and no bonding devices.
*/
memset(bond_info, 0, sizeof(*bond_info));
if (nl_rdma < 0)
return -1;
- if (!strstr(ibv_dev->name, "bond"))
+ if (!strstr(ibdev_name, "bond"))
return -1;
- np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
+ np = mlx5_nl_portnum(nl_rdma, ibdev_name);
if (!np)
return -1;
/*
- * The Master device might not be on the predefined
- * port (not on port index 1, it is not garanted),
- * we have to scan all Infiniband device port and
- * find master.
+ * The master device might not be on the predefined port(not on port
+ * index 1, it is not guaranteed), we have to scan all Infiniband
+ * device ports and find master.
*/
for (i = 1; i <= np; ++i) {
/* Check whether Infiniband port is populated. */
- ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
+ ifindex = mlx5_nl_ifindex(nl_rdma, ibdev_name, i);
if (!ifindex)
continue;
if (!if_indextoname(ifindex, ifname))
snprintf(tmp_str, sizeof(tmp_str),
"/sys/class/net/%s", ifname);
if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
- DRV_LOG(WARNING, "can not get PCI address"
- " for netdev \"%s\"", ifname);
+ DRV_LOG(WARNING,
+ "Cannot get PCI address for netdev \"%s\".",
+ ifname);
continue;
}
/* Slave interface PCI address match found. */
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, void *ctx,
+mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
struct rte_eth_devargs *req_eth_da,
uint16_t owner_id)
{
struct rte_pci_addr pci_addr;
DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
- bd = mlx5_device_bond_pci_match
- (ibv_list[ret], &owner_pci, nl_rdma, owner_id,
- &bond_info);
+ bd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,
+ nl_rdma, owner_id, &bond_info);
if (bd >= 0) {
/*
* Bonding device detected. Only one match is allowed,
/* Amend owner pci address if owner PF ID specified. */
if (eth_da.nb_representor_ports)
owner_pci.function += owner_id;
- DRV_LOG(INFO, "PCI information matches for"
- " slave %d bonding device \"%s\"",
- bd, ibv_list[ret]->name);
+ DRV_LOG(INFO,
+ "PCI information matches for slave %d bonding device \"%s\"",
+ bd, ibv_list[ret]->name);
ibv_match[nd++] = ibv_list[ret];
break;
} else {
list[ns].max_port = np;
list[ns].phys_port = i;
list[ns].phys_dev_name = ibv_match[0]->name;
- list[ns].ctx = ctx;
list[ns].eth_dev = NULL;
list[ns].pci_dev = pci_dev;
list[ns].cdev = cdev;
list[ns].max_port = 1;
list[ns].phys_port = 1;
list[ns].phys_dev_name = ibv_match[i]->name;
- list[ns].ctx = ctx;
list[ns].eth_dev = NULL;
list[ns].pci_dev = pci_dev;
list[ns].cdev = cdev;
}
ret = -1;
if (nl_route >= 0)
- ret = mlx5_nl_switch_info
- (nl_route,
- list[ns].ifindex,
- &list[ns].info);
+ ret = mlx5_nl_switch_info(nl_route,
+ list[ns].ifindex,
+ &list[ns].info);
if (ret || (!list[ns].info.representor &&
!list[ns].info.master)) {
/*
}
/*
* New kernels may add the switch_id attribute for the case
- * there is no E-Switch and we wrongly recognized the
- * only device as master. Override this if there is the
- * single device with single port and new device name
- * format present.
+ * there is no E-Switch and we wrongly recognized the only
+ * device as master. Override this if there is the single
+ * device with single port and new device name format present.
*/
if (nd == 1 &&
list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_os_pci_probe(struct mlx5_common_device *cdev, void *ctx)
+mlx5_os_pci_probe(struct mlx5_common_device *cdev)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
if (eth_da.nb_ports > 0) {
/* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
for (p = 0; p < eth_da.nb_ports; p++) {
- ret = mlx5_os_pci_probe_pf(cdev, ctx, ð_da,
+ ret = mlx5_os_pci_probe_pf(cdev, ð_da,
eth_da.ports[p]);
if (ret)
break;
mlx5_net_remove(cdev);
}
} else {
- ret = mlx5_os_pci_probe_pf(cdev, ctx, ð_da, 0);
+ ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0);
}
return ret;
}
/* Probe a single SF device on auxiliary bus, no representor support. */
static int
-mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev, void *ctx)
+mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
{
struct rte_eth_devargs eth_da = { .nb_ports = 0 };
struct mlx5_dev_config config;
/* Init spawn data. */
spawn.max_port = 1;
spawn.phys_port = 1;
- spawn.ctx = ctx;
- spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
+ spawn.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
ret = mlx5_auxiliary_get_ifindex(dev->name);
if (ret < 0) {
DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
mlx5_os_net_probe(struct mlx5_common_device *cdev)
{
int ret;
- void *ctx = NULL;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- ret = mlx5_os_open_device(cdev, &ctx);
- if (ret) {
- DRV_LOG(ERR, "Fail to open device %s", cdev->dev->name);
- return -rte_errno;
- }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
mlx5_pmd_socket_init();
- }
ret = mlx5_init_once();
if (ret) {
DRV_LOG(ERR, "Unable to init PMD global data: %s",
strerror(rte_errno));
- if (ctx != NULL)
- claim_zero(mlx5_glue->close_device(ctx));
return -rte_errno;
}
if (mlx5_dev_is_pci(cdev->dev))
- return mlx5_os_pci_probe(cdev, ctx);
+ return mlx5_os_pci_probe(cdev);
else
- return mlx5_os_auxiliary_probe(cdev, ctx);
+ return mlx5_os_auxiliary_probe(cdev);
}
/**
{
int ret;
int flags;
+ struct ibv_context *ctx = sh->cdev->ctx;
sh->intr_handle.fd = -1;
- flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
- ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
- F_SETFL, flags | O_NONBLOCK);
+ flags = fcntl(ctx->async_fd, F_GETFL);
+ ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
DRV_LOG(INFO, "failed to change file descriptor async event"
" queue");
} else {
- sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
+ sh->intr_handle.fd = ctx->async_fd;
sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
if (rte_intr_callback_register(&sh->intr_handle,
mlx5_dev_interrupt_handler, sh)) {
if (sh->devx) {
#ifdef HAVE_IBV_DEVX_ASYNC
sh->intr_handle_devx.fd = -1;
- sh->devx_comp =
- (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
+ sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
if (!devx_comp) {
DRV_LOG(INFO, "failed to allocate devx_comp.");
cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
}
#endif
- return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
- &cq_attr.ibv,
- &cq_attr.mlx5));
+ return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
+ (priv->sh->cdev->ctx,
+ &cq_attr.ibv,
+ &cq_attr.mlx5));
}
/**
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
- rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
+ rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
&wq_attr.mlx5);
#else
- rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
+ rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
#endif
if (rxq_obj->wq) {
/*
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
tmpl->ibv_channel =
- mlx5_glue->create_comp_channel(priv->sh->ctx);
+ mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
if (!tmpl->ibv_channel) {
DRV_LOG(ERR, "Port %u: comp channel creation failure.",
dev->data->port_id);
/* Finalise indirection table. */
for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
wq[i] = wq[j];
- ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = log_n,
- .ind_tbl = wq,
- .comp_mask = 0,
- });
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->sh->cdev->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = log_n,
+ .ind_tbl = wq,
+ .comp_mask = 0,
+ });
if (!ind_tbl->ind_table) {
rte_errno = errno;
return -rte_errno;
}
#endif
qp = mlx5_glue->dv_create_qp
- (priv->sh->ctx,
+ (priv->sh->cdev->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
&qp_init_attr);
#else
qp = mlx5_glue->create_qp_ex
- (priv->sh->ctx,
+ (priv->sh->cdev->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_context *ctx = priv->sh->ctx;
+ struct ibv_context *ctx = priv->sh->cdev->ctx;
struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
if (rxq)
goto error;
rxq = priv->drop_queue.rxq;
ind_tbl = mlx5_glue->create_rwq_ind_table
- (priv->sh->ctx,
+ (priv->sh->cdev->ctx,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = (struct ibv_wq **)&rxq->wq,
rte_errno = errno;
goto error;
}
- hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
+ hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask = IBV_QP_INIT_ATTR_PD |
qp_attr.max_tso_header = txq_ctrl->max_tso_header;
qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr);
+ qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
if (qp_obj == NULL) {
DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
dev->data->port_id, idx);
}
cqe_n = desc / MLX5_TX_COMP_THRESH +
1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
+ txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
+ NULL, NULL, 0);
if (txq_obj->cq == NULL) {
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
dev->data->port_id, idx);
#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct ibv_context *ctx = sh->ctx;
+ struct ibv_context *ctx = sh->cdev->ctx;
struct mlx5dv_qp_init_attr qp_init_attr = {0};
struct {
struct ibv_cq_init_attr_ex ibv;
* start after the common header that with the length of a DW(u32).
*/
node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
- prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+ prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
if (!prf->obj) {
DRV_LOG(ERR, "Failed to create flex parser node object.");
return (rte_errno == 0) ? -ENODEV : -rte_errno;
*/
uar_mapping = 0;
#endif
- sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
+ uar_mapping);
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
if (!sh->tx_uar &&
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
*/
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
- sh->tx_uar = mlx5_glue->devx_alloc_uar
- (sh->ctx, uar_mapping);
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
+ uar_mapping);
} else if (!sh->tx_uar &&
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
if (config->dbnc == MLX5_TXDB_NCACHED)
*/
DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
- sh->tx_uar = mlx5_glue->devx_alloc_uar
- (sh->ctx, uar_mapping);
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
+ uar_mapping);
}
#endif
if (!sh->tx_uar) {
}
for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
uar_mapping = 0;
- sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
- (sh->ctx, uar_mapping);
+ sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->cdev->ctx,
+ uar_mapping);
#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
if (!sh->devx_rx_uar &&
uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
- (sh->ctx, uar_mapping);
+ (sh->cdev->ctx, uar_mapping);
}
#endif
if (!sh->devx_rx_uar) {
*/
struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config)
+ const struct mlx5_dev_config *config)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
- if (!strcmp(sh->ibdev_name,
- mlx5_os_get_ctx_device_name(spawn->ctx))) {
+ if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
sh->refcnt++;
goto exit;
}
sh->numa_node = spawn->cdev->dev->numa_node;
sh->cdev = spawn->cdev;
sh->devx = sh->cdev->config.devx;
- sh->ctx = spawn->ctx;
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
+ err = mlx5_os_get_dev_attr(sh->cdev->ctx, &sh->device_attr);
if (err) {
DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
goto error;
sh->refcnt = 1;
sh->max_port = spawn->max_port;
sh->reclaim_mode = config->reclaim_mode;
- strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
+ strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
sizeof(sh->ibdev_name) - 1);
- strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),
+ strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
sizeof(sh->ibdev_path) - 1);
/*
* Setting port_id to max unallowed value means
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
}
- sh->pd = mlx5_os_alloc_pd(sh->ctx);
+ sh->pd = mlx5_os_alloc_pd(sh->cdev->ctx);
if (sh->pd == NULL) {
DRV_LOG(ERR, "PD allocation failure");
err = ENOMEM;
DRV_LOG(ERR, "Fail to extract pdn from PD");
goto error;
}
- sh->td = mlx5_devx_cmd_create_td(sh->ctx);
+ sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
if (!sh->td) {
DRV_LOG(ERR, "TD allocation failure");
err = ENOMEM;
goto error;
}
tis_attr.transport_domain = sh->td->id;
- sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
+ sh->tis = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
if (!sh->tis) {
DRV_LOG(ERR, "TIS allocation failure");
err = ENOMEM;
mlx5_glue->devx_free_uar(sh->tx_uar);
if (sh->pd)
claim_zero(mlx5_os_dealloc_pd(sh->pd));
- if (sh->ctx)
- claim_zero(mlx5_glue->close_device(sh->ctx));
mlx5_free(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
claim_zero(mlx5_devx_cmd_destroy(sh->td));
if (sh->devx_rx_uar)
mlx5_glue->devx_free_uar(sh->devx_rx_uar);
- if (sh->ctx)
- claim_zero(mlx5_glue->close_device(sh->ctx));
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
pthread_mutex_destroy(&sh->txpp.mutex);
mlx5_free(sh);
return 0;
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
- ((priv->sh->ctx != NULL) ?
- mlx5_os_get_ctx_device_name(priv->sh->ctx) : ""));
+ ((priv->sh->cdev->ctx != NULL) ?
+ mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
/*
* If default mreg copy action is removed at the stop stage,
* the search will return none and nothing will be done anymore.
int pf_bond; /**< bonding device PF index. < 0 - no bonding */
struct mlx5_switch_info info; /**< Switch information. */
const char *phys_dev_name; /**< Name of physical device. */
- void *ctx; /**< Associated physical device context. */
struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
struct rte_pci_device *pci_dev; /**< Backend PCI device. */
struct mlx5_common_device *cdev; /**< Backend common device. */
uint32_t max_port; /* Maximal IB device port index. */
struct mlx5_bond_info bond; /* Bonding information. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
- void *ctx; /* Verbs/DV/DevX context. */
void *pd; /* Protection Domain. */
uint32_t pdn; /* Protection Domain number. */
uint32_t tdn; /* Transport Domain number. */
rq_attr.wq_attr.pd = priv->sh->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
/* Create RQ using DevX API. */
- return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
+ return mlx5_devx_rq_create(priv->sh->cdev->ctx, &rxq_ctrl->obj->rq_obj,
wqe_size, log_desc_n, &rq_attr,
rxq_ctrl->socket);
}
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
log_cqe_n = log2above(cqe_n);
/* Create CQ using DevX API. */
- ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
- &cq_attr, sh->numa_node);
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
+ log_cqe_n, &cq_attr, sh->numa_node);
if (ret)
return ret;
cq_obj = &rxq_ctrl->obj->cq_obj;
attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
attr.counter_set_id = priv->counter_set_id;
- tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+ tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
rxq_ctrl->socket);
if (!tmpl->rq) {
DRV_LOG(ERR,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
tmpl->devx_channel = mlx5_os_devx_create_event_channel
- (priv->sh->ctx,
- devx_ev_flag);
+ (priv->sh->cdev->ctx,
+ devx_ev_flag);
if (!tmpl->devx_channel) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create event channel %d.",
ind_tbl->queues_n);
if (!rqt_attr)
return -rte_errno;
- ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
+ ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
hrxq->ind_table, tunnel, &tir_attr);
- hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+ hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
if (!hrxq->tir) {
DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
dev->data->port_id);
attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
attr.tis_num = priv->sh->tis->id;
- tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
+ tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
if (!tmpl->sq) {
DRV_LOG(ERR,
"Port %u tx hairpin queue %u can't create SQ object.",
.ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
};
/* Create Send Queue object with DevX. */
- return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
- &sq_attr, priv->sh->numa_node);
+ return mlx5_devx_sq_create(priv->sh->cdev->ctx, &txq_obj->sq_obj,
+ log_desc_n, &sq_attr, priv->sh->numa_node);
}
#endif
return 0;
}
/* Create completion queue object with DevX. */
- ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
&cq_attr, priv->sh->numa_node);
if (ret) {
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
+ mem_mng->umem = mlx5_os_umem_reg(sh->cdev->ctx, mem, size,
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
mkey_attr.pd = sh->pdn;
mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+ mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->cdev->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_os_umem_dereg(mem_mng->umem);
rte_errno = errno;
enum mlx5_access_aso_opc_mod aso_opc_mod)
{
uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+ struct mlx5_common_device *cdev = sh->cdev;
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
break;
case ASO_OPC_MOD_POLICER:
- if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->mtrmng->pools_mng.sq, 0,
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
sh->sq_ts_format))
return -1;
if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
&sh->ct_mng->aso_sq.mr, 0))
return -1;
- if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
+ if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
sh->sq_ts_format)) {
mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
}
*resource = *ctx_resource;
resource->idx = idx;
- ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
- resource,
+ ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->cdev->ctx,
+ domain, resource,
&resource->action);
if (ret) {
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
else
ns = sh->rx_domain;
ret = mlx5_flow_os_create_flow_action_modify_header
- (sh->ctx, ns, entry,
+ (sh->cdev->ctx, ns, entry,
data_len, &entry->action);
if (ret) {
mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
if (fallback) {
/* bulk_bitmap must be 0 for single counter allocation. */
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0);
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id(cmng, dcs->id);
*cnt_free = cnt;
return pool;
}
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
if (!dcs) {
rte_errno = ENODATA;
return NULL;
uint32_t log_obj_size;
log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
- dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
+ dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->cdev->ctx,
priv->sh->pdn, log_obj_size);
if (!dcs) {
rte_errno = ENODATA;
}
} else {
/* Create a GENEVE TLV object and resource. */
- obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+ obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->cdev->ctx,
geneve_opt_v->option_class,
geneve_opt_v->option_type,
geneve_opt_v->option_len);
dv_attr.priority = ref->priority;
if (tbl->is_egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->tbl.obj,
&resource->matcher_object);
if (ret) {
mlx5_free(resource);
struct mlx5_devx_obj *obj = NULL;
uint32_t i;
- obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+ obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->cdev->ctx,
priv->sh->pdn);
if (!obj) {
rte_errno = ENODATA;
uint32_t i;
uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
- obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
+ obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
priv->sh->pdn, log_obj_size);
if (!obj) {
rte_errno = ENODATA;
goto err;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
- &matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->obj, &matcher);
if (ret)
goto err;
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
0, 0, 0, NULL);
if (!tbl)
goto err;
- dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->cdev->ctx, 0x4);
if (!dcs)
goto err;
ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
goto err;
dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
__flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
- ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
- &matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->obj, &matcher);
if (ret)
goto err;
__flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_context *ctx = priv->sh->ctx;
+ struct ibv_context *ctx = priv->sh->cdev->ctx;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->shared_info.id};
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_context *ctx = priv->sh->ctx;
+ struct ibv_context *ctx = priv->sh->cdev->ctx;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
{
MLX5_ASSERT(!sh->txpp.echan);
- sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
+ sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->cdev->ctx,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
if (!sh->txpp.echan) {
rte_errno = errno;
MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
sh->txpp.pp = mlx5_glue->dv_alloc_pp
- (sh->ctx, sizeof(pp), &pp,
+ (sh->cdev->ctx, sizeof(pp), &pp,
MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
if (sh->txpp.pp == NULL) {
DRV_LOG(ERR, "Failed to allocate packet pacing index.");
int ret;
/* Create completion queue object for Rearm Queue. */
- ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
sh->numa_node);
if (ret) {
/* Create send queue object for Rearm Queue. */
sq_attr.cqn = wq->cq_obj.cq->id;
/* There should be no WQE leftovers in the cyclic queue. */
- ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
+ ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
sh->numa_node);
if (ret) {
sh->txpp.ts_p = 0;
sh->txpp.ts_n = 0;
/* Create completion queue object for Clock Queue. */
- ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+ ret = mlx5_devx_cq_create(sh->cdev->ctx, &wq->cq_obj,
log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
sh->numa_node);
if (ret) {
sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
sq_attr.wq_attr.pd = sh->pdn;
sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
- ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
+ ret = mlx5_devx_sq_create(sh->cdev->ctx, &wq->sq_obj,
+ log2above(wq->sq_size),
&sq_attr, sh->numa_node);
if (ret) {
rte_errno = errno;
return -rte_errno;
}
priv = dev->data->dev_private;
- context_obj = (mlx5_context_st *)priv->sh->ctx;
+ context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
memcpy(mac, context_obj->mlx5_dev.eth_mac, RTE_ETHER_ADDR_LEN);
return 0;
}
return -rte_errno;
}
priv = dev->data->dev_private;
- context_obj = (mlx5_context_st *)priv->sh->ctx;
+ context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
strncpy(*ifname, context_obj->mlx5_dev.name, MLX5_NAMESIZE);
return 0;
}
return -rte_errno;
}
priv = dev->data->dev_private;
- context_obj = (mlx5_context_st *)priv->sh->ctx;
+ context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
*mtu = context_obj->mlx5_dev.mtu_bytes;
return 0;
}
return -rte_errno;
}
priv = dev->data->dev_private;
- context_obj = (mlx5_context_st *)priv->sh->ctx;
+ context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
dev_link.link_speed = context_obj->mlx5_dev.link_speed / (1000 * 1000);
dev_link.link_status =
(context_obj->mlx5_dev.link_state == 1 && !mlx5_is_removed(dev))
int err;
struct mlx5_devx_clock mlx5_clock;
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
+ mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
err = mlx5_glue->query_rt_values(context_obj, &mlx5_clock);
if (err != 0) {
mlx5_is_removed(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
+ mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->cdev->ctx;
if (*context_obj->shutdown_event_obj.p_flag)
return 1;
/* Initialize the shutdown event in mlx5_dev_spawn to
* support mlx5_is_removed for Windows.
*/
- err = mlx5_glue->devx_init_showdown_event(sh->ctx);
+ err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
if (err) {
DRV_LOG(ERR, "failed to init showdown event: %s",
strerror(errno));
goto error;
}
DRV_LOG(DEBUG, "MPW isn't supported");
- mlx5_os_get_dev_attr(sh->ctx, &device_attr);
+ mlx5_os_get_dev_attr(sh->cdev->ctx, &device_attr);
config->swp = device_attr.sw_parsing_offloads &
(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
MLX5_SW_PARSING_TSO_CAP);
config->cqe_comp = 0;
}
if (sh->devx) {
- err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
+ err = mlx5_devx_cmd_query_hca_attr(sh->cdev->ctx,
+ &config->hca_attr);
if (err) {
err = -err;
goto error;
err = config->hca_attr.access_register_user ?
mlx5_devx_cmd_register_read
- (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+ (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
if (!err) {
uint32_t ts_mode;
.pf_bond = -1,
.max_port = 1,
.phys_port = 1,
+ .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
.pci_dev = pci_dev,
.cdev = cdev,
.ifindex = -1, /* Spawn will assign */
.dv_flow_en = 1,
.log_hp_size = MLX5_ARG_UNSET,
};
- void *ctx;
int ret;
uint32_t restore;
DRV_LOG(ERR, "Secondary process is not supported on Windows.");
return -ENOTSUP;
}
- ret = mlx5_os_open_device(cdev, &ctx);
- if (ret) {
- DRV_LOG(ERR, "Fail to open DevX device %s", cdev->dev->name);
- return -rte_errno;
- }
ret = mlx5_init_once();
if (ret) {
DRV_LOG(ERR, "unable to init PMD global data: %s",
strerror(rte_errno));
- claim_zero(mlx5_glue->close_device(ctx));
return -rte_errno;
}
- spawn.ctx = ctx;
- spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
/* Device specific configuration. */
switch (pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
break;
}
spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
- if (!spawn.eth_dev) {
- claim_zero(mlx5_glue->close_device(ctx));
+ if (!spawn.eth_dev)
return -rte_errno;
- }
restore = spawn.eth_dev->data->dev_flags;
rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
/* Restore non-PCI flags cleared by the above call. */
/* Iterate all the existing mlx5 devices. */
TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
mlx5_free_mr_by_addr(&priv->mr_scache,
- priv->ctx->device->name,
+ mlx5_os_get_ctx_device_name
+ (priv->cdev->ctx),
addr, len);
pthread_mutex_unlock(&mem_event_list_lock);
break;
static int
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
{
- struct ibv_device *ibv;
struct mlx5_regex_priv *priv = NULL;
- struct ibv_context *ctx = NULL;
struct mlx5_hca_attr attr;
char name[RTE_REGEXDEV_NAME_MAX_LEN];
int ret;
uint32_t val;
- ibv = mlx5_os_get_ibv_dev(cdev->dev);
- if (ibv == NULL)
- return -rte_errno;
- DRV_LOG(INFO, "Probe device \"%s\".", ibv->name);
- ctx = mlx5_glue->dv_open_device(ibv);
- if (!ctx) {
- DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
- rte_errno = ENODEV;
- return -rte_errno;
- }
- ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+ ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
- goto dev_error;
+ return -rte_errno;
} else if (((!attr.regex) && (!attr.mmo_regex_sq_en) &&
(!attr.mmo_regex_qp_en)) || attr.regexp_num_of_engines == 0) {
DRV_LOG(ERR, "Not enough capabilities to support RegEx, maybe "
"old FW/OFED version?");
rte_errno = ENOTSUP;
- goto dev_error;
+ return -rte_errno;
}
- if (mlx5_regex_engines_status(ctx, 2)) {
+ if (mlx5_regex_engines_status(cdev->ctx, 2)) {
DRV_LOG(ERR, "RegEx engine error.");
rte_errno = ENOMEM;
- goto dev_error;
+ return -rte_errno;
}
priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (!priv) {
DRV_LOG(ERR, "Failed to allocate private memory.");
rte_errno = ENOMEM;
- goto dev_error;
+ return -rte_errno;
}
priv->mmo_regex_qp_cap = attr.mmo_regex_qp_en;
priv->mmo_regex_sq_cap = attr.mmo_regex_sq_en;
priv->qp_ts_format = attr.qp_ts_format;
- priv->ctx = ctx;
+ priv->cdev = cdev;
priv->nb_engines = 2; /* attr.regexp_num_of_engines */
- ret = mlx5_devx_regex_register_read(priv->ctx, 0,
+ ret = mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
MLX5_RXP_CSR_IDENTIFIER, &val);
if (ret) {
DRV_LOG(ERR, "CSR read failed!");
if (priv->regexdev == NULL) {
DRV_LOG(ERR, "Failed to register RegEx device.");
rte_errno = rte_errno ? rte_errno : EINVAL;
- goto error;
+ goto dev_error;
}
/*
* This PMD always claims the write memory barrier on UAR
* registers writings, it is safe to allocate UAR with any
* memory mapping type.
*/
- priv->uar = mlx5_devx_alloc_uar(ctx, -1);
+ priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (!priv->uar) {
DRV_LOG(ERR, "can't allocate uar.");
rte_errno = ENOMEM;
goto error;
}
- priv->pd = mlx5_glue->alloc_pd(ctx);
+ priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
if (!priv->pd) {
DRV_LOG(ERR, "can't allocate pd.");
rte_errno = ENOMEM;
if (priv->regexdev)
rte_regexdev_unregister(priv->regexdev);
dev_error:
- if (ctx)
- mlx5_glue->close_device(ctx);
if (priv)
rte_free(priv);
return -rte_errno;
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
rte_regexdev_unregister(priv->regexdev);
- if (priv->ctx)
- mlx5_glue->close_device(priv->ctx);
rte_free(priv);
}
return 0;
struct mlx5_regex_priv {
TAILQ_ENTRY(mlx5_regex_priv) next;
- struct ibv_context *ctx; /* Device context. */
+ struct mlx5_common_device *cdev; /* Backend mlx5 device. */
struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */
uint16_t nb_queues; /* Number of queues. */
struct mlx5_regex_qp *qps; /* Pointer to the qp array. */
int ret;
cq->ci = 0;
- ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, cq->log_nb_desc,
+ ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc,
&attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Can't create CQ object.");
attr.sq_size = RTE_BIT32(MLX5_REGEX_WQE_LOG_NUM(priv->has_umr,
log_nb_desc));
attr.mmo = priv->mmo_regex_qp_cap;
- ret = mlx5_devx_qp_create(priv->ctx, &qp_obj->qp_obj,
+ ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp_obj->qp_obj,
MLX5_REGEX_WQE_LOG_NUM(priv->has_umr, log_nb_desc),
&attr, SOCKET_ID_ANY);
if (ret) {
for (i = 0; i < qp->nb_desc; i++) {
attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
attr.klm_array = qp->jobs[i].imkey_array;
- qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create(priv->ctx,
- &attr);
+ qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create
+ (priv->cdev->ctx, &attr);
if (!qp->jobs[i].imkey) {
err = -rte_errno;
DRV_LOG(ERR, "Failed to allocate imkey.");
uint32_t poll_value;
uint32_t expected_value;
uint32_t expected_mask;
- struct ibv_context *ctx = priv->ctx;
+ struct ibv_context *ctx = priv->cdev->ctx;
int ret = 0;
/* Read the rtru ctrl CSR. */
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
tmp_addr = rxp_get_reg_address(address);
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
tmp_addr, ®_val);
if (ret)
goto parse_error;
if (tmp_addr == UINT32_MAX)
goto parse_error;
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(priv->cdev->ctx, id,
tmp_addr, ®_val);
if (ret) {
DRV_LOG(ERR, "RXP CSR read failed!");
*/
temp = val;
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id,
+ (priv->cdev->ctx, id,
MLX5_RXP_RTRU_CSR_DATA_0, temp);
temp = (uint32_t)(val >> 32);
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id,
+ (priv->cdev->ctx, id,
MLX5_RXP_RTRU_CSR_DATA_0 +
MLX5_RXP_CSR_WIDTH, temp);
temp = address;
ret |= mlx5_devx_regex_register_write
- (priv->ctx, id, MLX5_RXP_RTRU_CSR_ADDR,
- temp);
+ (priv->cdev->ctx, id,
+ MLX5_RXP_RTRU_CSR_ADDR, temp);
if (ret) {
DRV_LOG(ERR,
"Failed to copy instructions to RXP.");
int ret;
uint32_t umem_id;
- ret = mlx5_devx_regex_database_stop(priv->ctx, id);
+ ret = mlx5_devx_regex_database_stop(priv->cdev->ctx, id);
if (ret < 0) {
DRV_LOG(ERR, "stop engine failed!");
return ret;
}
umem_id = mlx5_os_get_umem_id(priv->db[db_to_use].umem.umem);
- ret = mlx5_devx_regex_database_program(priv->ctx, id, umem_id, 0);
+ ret = mlx5_devx_regex_database_program(priv->cdev->ctx, id, umem_id, 0);
if (ret < 0) {
DRV_LOG(ERR, "program db failed!");
return ret;
static int
mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id)
{
- mlx5_devx_regex_database_resume(priv->ctx, id);
+ mlx5_devx_regex_database_resume(priv->cdev->ctx, id);
return 0;
}
{
int ret;
uint32_t val;
+ struct ibv_context *ctx = priv->cdev->ctx;
ret = rxp_init_eng(priv, id);
if (ret < 0)
return ret;
/* Confirm the RXP is initialised. */
- if (mlx5_devx_regex_register_read(priv->ctx, id,
- MLX5_RXP_CSR_STATUS, &val)) {
+ if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_STATUS, &val)) {
DRV_LOG(ERR, "Failed to read from RXP!");
return -ENODEV;
}
DRV_LOG(ERR, "RXP not initialised...");
return -EBUSY;
}
- ret = mlx5_devx_regex_register_read(priv->ctx, id,
+ ret = mlx5_devx_regex_register_read(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, &val);
if (ret) {
DRV_LOG(ERR, "CSR read failed!");
return -1;
}
val |= MLX5_RXP_RTRU_CSR_CTRL_GO;
- ret = mlx5_devx_regex_register_write(priv->ctx, id,
+ ret = mlx5_devx_regex_register_write(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, val);
if (ret) {
DRV_LOG(ERR, "Can't program rof file!");
}
if (priv->is_bf2) {
ret = rxp_poll_csr_for_value
- (priv->ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
+ (ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
}
DRV_LOG(DEBUG, "Rules update took %d cycles", ret);
}
- if (mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
+ if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
&val)) {
DRV_LOG(ERR, "CSR read failed!");
return -1;
}
val &= ~(MLX5_RXP_RTRU_CSR_CTRL_GO);
- if (mlx5_devx_regex_register_write(priv->ctx, id,
+ if (mlx5_devx_regex_register_write(ctx, id,
MLX5_RXP_RTRU_CSR_CTRL, val)) {
DRV_LOG(ERR, "CSR write failed!");
return -1;
}
- ret = mlx5_devx_regex_register_read(priv->ctx, id, MLX5_RXP_CSR_CTRL,
- &val);
+ ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &val);
if (ret)
return ret;
val &= ~MLX5_RXP_CSR_CTRL_INIT;
- ret = mlx5_devx_regex_register_write(priv->ctx, id, MLX5_RXP_CSR_CTRL,
- val);
+ ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, val);
if (ret)
return ret;
rxp_init_rtru(priv, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2);
if (priv->is_bf2) {
- ret = rxp_poll_csr_for_value(priv->ctx, &val,
- MLX5_RXP_CSR_STATUS,
+ ret = rxp_poll_csr_for_value(ctx, &val, MLX5_RXP_CSR_STATUS,
MLX5_RXP_CSR_STATUS_INIT_DONE,
MLX5_RXP_CSR_STATUS_INIT_DONE,
MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT,
{
uint32_t ctrl;
uint32_t reg;
- struct ibv_context *ctx = priv->ctx;
+ struct ibv_context *ctx = priv->cdev->ctx;
int ret;
ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
goto tidyup_error;
}
/* Register the memory. */
- priv->db[i].umem.umem = mlx5_glue->devx_umem_reg(priv->ctx,
- priv->db[i].ptr,
- MLX5_MAX_DB_SIZE, 7);
+ priv->db[i].umem.umem = mlx5_glue->devx_umem_reg
+ (priv->cdev->ctx,
+ priv->db[i].ptr,
+ MLX5_MAX_DB_SIZE, 7);
if (!priv->db[i].umem.umem) {
DRV_LOG(ERR, "Failed to register memory!");
ret = ENODEV;
}
if (rule_db_len == 0)
return -EINVAL;
- if (mlx5_devx_regex_register_read(priv->ctx, 0,
+ if (mlx5_devx_regex_register_read(priv->cdev->ctx, 0,
MLX5_RXP_CSR_BASE_ADDRESS, &ver)) {
DRV_LOG(ERR, "Failed to read Main CSRs Engine 0!");
return -1;
}
/* Need to ensure RXP not busy before stop! */
for (id = 0; id < priv->nb_engines; id++) {
- ret = rxp_stop_engine(priv->ctx, id);
+ ret = rxp_stop_engine(priv->cdev->ctx, id);
if (ret) {
DRV_LOG(ERR, "Can't stop engine.");
ret = -ENODEV;
ret = -ENODEV;
goto tidyup_error;
}
- ret = rxp_start_engine(priv->ctx, id);
+ ret = rxp_start_engine(priv->cdev->ctx, id);
if (ret) {
DRV_LOG(ERR, "Can't start engine.");
ret = -ENODEV;