#include <rte_errno.h>
#include <rte_string_fns.h>
+#include <rte_bus_pci.h>
+#include <rte_bus_auxiliary.h>
#include "mlx5_common.h"
+#include "mlx5_nl.h"
#include "mlx5_common_log.h"
+#include "mlx5_common_private.h"
#include "mlx5_common_defs.h"
#include "mlx5_common_os.h"
#include "mlx5_glue.h"
mlx5_glue = NULL;
}
-struct ibv_device *
+static struct ibv_device *
mlx5_os_get_ibv_device(const struct rte_pci_addr *addr)
{
int n;
return ibv_match;
}
+/* Try to disable ROCE by Netlink\Devlink. */
+static int
+mlx5_nl_roce_disable(const char *addr)
+{
+ int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
+ int devlink_id;
+ int enable;
+ int ret;
+
+ if (nlsk_fd < 0)
+ return nlsk_fd;
+ devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
+ if (devlink_id < 0) {
+ ret = devlink_id;
+ DRV_LOG(DEBUG,
+ "Failed to get devlink id for ROCE operations by Netlink.");
+ goto close;
+ }
+ ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
+ if (ret) {
+ DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
+ ret);
+ goto close;
+ } else if (!enable) {
+ DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
+ goto close;
+ }
+ ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
+ if (ret)
+ DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
+ else
+ DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
+close:
+ close(nlsk_fd);
+ return ret;
+}
+
+/* Try to disable ROCE by sysfs. */
+static int
+mlx5_sys_roce_disable(const char *addr)
+{
+ FILE *file_o;
+ int enable;
+ int ret;
+
+ MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
+ file_o = fopen(file_p, "rb");
+ if (!file_o) {
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+ }
+ ret = fscanf(file_o, "%d", &enable);
+ if (ret != 1) {
+ rte_errno = EINVAL;
+ ret = EINVAL;
+ goto close;
+ } else if (!enable) {
+ ret = 0;
+ DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
+ goto close;
+ }
+ fclose(file_o);
+ file_o = fopen(file_p, "wb");
+ if (!file_o) {
+ rte_errno = ENOTSUP;
+ return -ENOTSUP;
+ }
+ fprintf(file_o, "0\n");
+ ret = 0;
+close:
+ if (ret)
+ DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
+ else
+ DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
+ fclose(file_o);
+ return ret;
+}
+
+static int
+mlx5_roce_disable(const struct rte_device *dev)
+{
+ char pci_addr[PCI_PRI_STR_SIZE] = { 0 };
+
+ if (mlx5_dev_to_pci_str(dev, pci_addr, sizeof(pci_addr)) < 0)
+ return -rte_errno;
+ /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
+ if (mlx5_nl_roce_disable(pci_addr) != 0 &&
+ mlx5_sys_roce_disable(pci_addr) != 0)
+ return -rte_errno;
+ return 0;
+}
+
+static struct ibv_device *
+mlx5_os_get_ibv_dev(const struct rte_device *dev)
+{
+ struct ibv_device *ibv;
+
+ if (mlx5_dev_is_pci(dev))
+ ibv = mlx5_os_get_ibv_device(&RTE_DEV_TO_PCI_CONST(dev)->addr);
+ else
+ ibv = mlx5_get_aux_ibv_device(RTE_DEV_TO_AUXILIARY_CONST(dev));
+ if (ibv == NULL) {
+ rte_errno = ENODEV;
+ DRV_LOG(ERR, "Verbs device not found: %s", dev->name);
+ }
+ return ibv;
+}
+
+static struct ibv_device *
+mlx5_vdpa_get_ibv_dev(const struct rte_device *dev)
+{
+ struct ibv_device *ibv;
+ int retry;
+
+ if (mlx5_roce_disable(dev) != 0) {
+ DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
+ dev->name);
+ return NULL;
+ }
+ /* Wait for the IB device to appear again after reload. */
+ for (retry = MLX5_VDPA_MAX_RETRIES; retry > 0; --retry) {
+ ibv = mlx5_os_get_ibv_dev(dev);
+ if (ibv != NULL)
+ return ibv;
+ usleep(MLX5_VDPA_USEC);
+ }
+ DRV_LOG(ERR,
+ "Cannot get IB device after disabling RoCE for \"%s\", retries exceed %d.",
+ dev->name, MLX5_VDPA_MAX_RETRIES);
+ rte_errno = EAGAIN;
+ return NULL;
+}
+
static int
mlx5_config_doorbell_mapping_env(int dbnc)
{
struct ibv_context *ctx = NULL;
int dbmap_env;
- ibv = mlx5_os_get_ibv_dev(cdev->dev);
+ if (classes & MLX5_CLASS_VDPA)
+ ibv = mlx5_vdpa_get_ibv_dev(cdev->dev);
+ else
+ ibv = mlx5_os_get_ibv_dev(cdev->dev);
if (!ibv)
return -rte_errno;
DRV_LOG(INFO, "Dev information matches for device \"%s\".", ibv->name);
free(addr);
}
-struct ibv_device *
-mlx5_os_get_ibv_device(const struct rte_pci_addr *addr);
-
-__rte_internal
-struct ibv_device *
-mlx5_os_get_ibv_dev(const struct rte_device *dev);
-
void
mlx5_set_context_attr(struct rte_device *dev, struct ibv_context *ctx);
#include <inttypes.h>
#include <rte_errno.h>
-#include <rte_bus_pci.h>
#include <rte_eal_paging.h>
-#include <rte_bus_auxiliary.h>
#include "mlx5_common_utils.h"
#include "mlx5_common_log.h"
-#include "mlx5_common_private.h"
#include "mlx5_autoconf.h"
#include <mlx5_glue.h>
#include <mlx5_malloc.h>
#include <mlx5_common.h>
#include <mlx5_common_mr.h>
-struct ibv_device *
-mlx5_os_get_ibv_dev(const struct rte_device *dev)
-{
- struct ibv_device *ibv;
-
- if (mlx5_dev_is_pci(dev))
- ibv = mlx5_os_get_ibv_device(&RTE_DEV_TO_PCI_CONST(dev)->addr);
- else
- ibv = mlx5_get_aux_ibv_device(RTE_DEV_TO_AUXILIARY_CONST(dev));
- if (ibv == NULL) {
- rte_errno = ENODEV;
- DRV_LOG(ERR, "Verbs device not found: %s", dev->name);
- }
- return ibv;
-}
-
/**
* Verbs callback to allocate a memory. This function should allocate the space
* according to the size provided residing inside a huge page.
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_nl_driver_reload(int nlsk_fd, int family_id, const char *pci_addr)
{
struct nlmsghdr *nlh;
__rte_internal
uint32_t mlx5_nl_vlan_vmwa_create(struct mlx5_nl_vlan_vmwa_context *vmwa,
uint32_t ifindex, uint16_t tag);
-__rte_internal
+
int mlx5_nl_devlink_family_id_get(int nlsk_fd);
-__rte_internal
int mlx5_nl_enable_roce_get(int nlsk_fd, int family_id, const char *pci_addr,
int *enable);
-__rte_internal
-int mlx5_nl_driver_reload(int nlsk_fd, int family_id, const char *pci_addr);
-__rte_internal
int mlx5_nl_enable_roce_set(int nlsk_fd, int family_id, const char *pci_addr,
int enable);
* - 0 on success.
* - Negative value and rte_errno is set otherwise.
*/
-__rte_internal
int mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size);
/*
#define MLX5_TXDB_NCACHED 1
#define MLX5_TXDB_HEURISTIC 2
+#define MLX5_VDPA_MAX_RETRIES 20
+#define MLX5_VDPA_USEC 1000
+
#endif /* RTE_PMD_MLX5_COMMON_DEFS_H_ */
mlx5_create_mr_ext;
mlx5_dev_is_pci;
- mlx5_dev_to_pci_str;
mlx5_devx_alloc_uar; # WINDOWS_NO_EXPORT
mlx5_mr_release_cache;
mlx5_nl_allmulti; # WINDOWS_NO_EXPORT
- mlx5_nl_devlink_family_id_get; # WINDOWS_NO_EXPORT
- mlx5_nl_driver_reload; # WINDOWS_NO_EXPORT
- mlx5_nl_enable_roce_get; # WINDOWS_NO_EXPORT
- mlx5_nl_enable_roce_set; # WINDOWS_NO_EXPORT
mlx5_nl_ifindex; # WINDOWS_NO_EXPORT
mlx5_nl_init; # WINDOWS_NO_EXPORT
mlx5_nl_mac_addr_add; # WINDOWS_NO_EXPORT
mlx5_os_alloc_pd;
mlx5_os_dealloc_pd;
mlx5_os_dereg_mr;
- mlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT
mlx5_os_reg_mr;
mlx5_os_umem_dereg;
mlx5_os_umem_reg;
#include <mlx5_glue.h>
#include <mlx5_common.h>
+#include <mlx5_common_defs.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
#include <mlx5_nl.h>
(1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
(1ULL << VHOST_USER_PROTOCOL_F_STATUS))
-#define MLX5_VDPA_MAX_RETRIES 20
-#define MLX5_VDPA_USEC 1000
#define MLX5_VDPA_DEFAULT_NO_TRAFFIC_MAX 16LLU
TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =
mlx5_vdpa_pd_create(struct mlx5_vdpa_priv *priv)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- priv->pd = mlx5_glue->alloc_pd(priv->ctx);
+ priv->pd = mlx5_glue->alloc_pd(priv->cdev->ctx);
if (priv->pd == NULL) {
DRV_LOG(ERR, "Failed to allocate PD.");
return errno ? -errno : -ENOMEM;
DRV_LOG(DEBUG, "Vhost MTU is 0.");
return ret;
}
- ret = mlx5_get_ifname_sysfs(priv->ctx->device->ibdev_path,
- request.ifr_name);
+ ret = mlx5_get_ifname_sysfs
+ (mlx5_os_get_ctx_device_name(priv->cdev->ctx),
+ request.ifr_name);
if (ret) {
DRV_LOG(DEBUG, "Cannot get kernel IF name - %d.", ret);
return ret;
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
return -EINVAL;
}
- return priv->ctx->cmd_fd;
+ return ((struct ibv_context *)priv->cdev->ctx)->cmd_fd;
}
static int
.reset_stats = mlx5_vdpa_reset_stats,
};
-/* Try to disable ROCE by Netlink\Devlink. */
-static int
-mlx5_vdpa_nl_roce_disable(const char *addr)
-{
- int nlsk_fd = mlx5_nl_init(NETLINK_GENERIC);
- int devlink_id;
- int enable;
- int ret;
-
- if (nlsk_fd < 0)
- return nlsk_fd;
- devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
- if (devlink_id < 0) {
- ret = devlink_id;
- DRV_LOG(DEBUG, "Failed to get devlink id for ROCE operations by"
- " Netlink.");
- goto close;
- }
- ret = mlx5_nl_enable_roce_get(nlsk_fd, devlink_id, addr, &enable);
- if (ret) {
- DRV_LOG(DEBUG, "Failed to get ROCE enable by Netlink: %d.",
- ret);
- goto close;
- } else if (!enable) {
- DRV_LOG(INFO, "ROCE has already disabled(Netlink).");
- goto close;
- }
- ret = mlx5_nl_enable_roce_set(nlsk_fd, devlink_id, addr, 0);
- if (ret)
- DRV_LOG(DEBUG, "Failed to disable ROCE by Netlink: %d.", ret);
- else
- DRV_LOG(INFO, "ROCE is disabled by Netlink successfully.");
-close:
- close(nlsk_fd);
- return ret;
-}
-
-/* Try to disable ROCE by sysfs. */
-static int
-mlx5_vdpa_sys_roce_disable(const char *addr)
-{
- FILE *file_o;
- int enable;
- int ret;
-
- MKSTR(file_p, "/sys/bus/pci/devices/%s/roce_enable", addr);
- file_o = fopen(file_p, "rb");
- if (!file_o) {
- rte_errno = ENOTSUP;
- return -ENOTSUP;
- }
- ret = fscanf(file_o, "%d", &enable);
- if (ret != 1) {
- rte_errno = EINVAL;
- ret = EINVAL;
- goto close;
- } else if (!enable) {
- ret = 0;
- DRV_LOG(INFO, "ROCE has already disabled(sysfs).");
- goto close;
- }
- fclose(file_o);
- file_o = fopen(file_p, "wb");
- if (!file_o) {
- rte_errno = ENOTSUP;
- return -ENOTSUP;
- }
- fprintf(file_o, "0\n");
- ret = 0;
-close:
- if (ret)
- DRV_LOG(DEBUG, "Failed to disable ROCE by sysfs: %d.", ret);
- else
- DRV_LOG(INFO, "ROCE is disabled by sysfs successfully.");
- fclose(file_o);
- return ret;
-}
-
-static int
-mlx5_vdpa_roce_disable(struct rte_device *dev)
-{
- char pci_addr[PCI_PRI_STR_SIZE] = { 0 };
-
- if (mlx5_dev_to_pci_str(dev, pci_addr, sizeof(pci_addr)) < 0)
- return -rte_errno;
- /* Firstly try to disable ROCE by Netlink and fallback to sysfs. */
- if (mlx5_vdpa_nl_roce_disable(pci_addr) != 0 &&
- mlx5_vdpa_sys_roce_disable(pci_addr) != 0)
- return -rte_errno;
- return 0;
-}
-
static int
mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
{
static int
mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev)
{
- struct ibv_device *ibv;
struct mlx5_vdpa_priv *priv = NULL;
- struct ibv_context *ctx = NULL;
struct mlx5_hca_attr attr;
- int retry;
int ret;
- if (mlx5_vdpa_roce_disable(cdev->dev) != 0) {
- DRV_LOG(WARNING, "Failed to disable ROCE for \"%s\".",
- cdev->dev->name);
- return -rte_errno;
- }
- /* Wait for the IB device to appear again after reload. */
- for (retry = MLX5_VDPA_MAX_RETRIES; retry > 0; --retry) {
- ibv = mlx5_os_get_ibv_dev(cdev->dev);
- if (ibv != NULL)
- break;
- usleep(MLX5_VDPA_USEC);
- }
- if (ibv == NULL) {
- DRV_LOG(ERR, "Cannot get IB device after disabling RoCE for "
- "\"%s\", retries exceed %d.",
- cdev->dev->name, MLX5_VDPA_MAX_RETRIES);
- rte_errno = EAGAIN;
- return -rte_errno;
- }
- ctx = mlx5_glue->dv_open_device(ibv);
- if (!ctx) {
- DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
- rte_errno = ENODEV;
- return -rte_errno;
- }
- ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
+ ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &attr);
if (ret) {
DRV_LOG(ERR, "Unable to read HCA capabilities.");
rte_errno = ENOTSUP;
- goto error;
+ return -rte_errno;
} else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
"old FW/OFED version?");
rte_errno = ENOTSUP;
- goto error;
+ return -rte_errno;
}
if (!attr.vdpa.queue_counters_valid)
DRV_LOG(DEBUG, "No capability to support virtq statistics.");
if (!priv) {
DRV_LOG(ERR, "Failed to allocate private memory.");
rte_errno = ENOMEM;
- goto error;
+ return -rte_errno;
}
priv->caps = attr.vdpa;
priv->log_max_rqt_size = attr.log_max_rqt_size;
priv->qp_ts_format = attr.qp_ts_format;
if (attr.num_lag_ports == 0)
priv->num_lag_ports = 1;
- priv->ctx = ctx;
- priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
+ priv->cdev = cdev;
+ priv->var = mlx5_glue->dv_alloc_var(priv->cdev->ctx, 0);
if (!priv->var) {
DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
goto error;
mlx5_glue->dv_free_var(priv->var);
rte_free(priv);
}
- if (ctx)
- mlx5_glue->close_device(ctx);
return -rte_errno;
}
}
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
- mlx5_glue->close_device(priv->ctx);
pthread_mutex_destroy(&priv->vq_config_lock);
rte_free(priv);
}
uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
struct rte_vdpa_device *vdev; /* vDPA device. */
+ struct mlx5_common_device *cdev; /* Backend mlx5 device. */
int vid; /* vhost device id. */
- struct ibv_context *ctx; /* Device context. */
struct mlx5_hca_vdpa_attr caps;
uint32_t pdn; /* Protection Domain number. */
struct ibv_pd *pd;
{
if (priv->eventc)
return 0;
- priv->eventc = mlx5_os_devx_create_event_channel(priv->ctx,
+ priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
if (!priv->eventc) {
rte_errno = errno;
* registers writings, it is safe to allocate UAR with any
* memory mapping type.
*/
- priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
+ priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1);
if (!priv->uar) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to allocate UAR.");
uint16_t event_nums[1] = {0};
int ret;
- ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
- SOCKET_ID_ANY);
+ ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
+ &attr, SOCKET_ID_ANY);
if (ret)
goto error;
cq->cq_ci = 0;
int flags;
/* Setup device event channel. */
- priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
+ priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
+ 0);
if (!priv->err_chnl) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create device event channel %d.",
return -1;
attr.pd = priv->pdn;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
+ eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
if (!eqp->fw_qp) {
DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
goto error;
attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
attr.sq_size = 0; /* No need SQ. */
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr,
- SOCKET_ID_ANY);
+ ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp), log_desc_n,
+ &attr, SOCKET_ID_ANY);
if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
return -1;
}
- mr->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+ mr->umem = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
(void *)(uintptr_t)log_base,
log_size, IBV_ACCESS_LOCAL_WRITE);
if (!mr->umem) {
goto err;
}
mkey_attr.umem_id = mr->umem->umem_id;
- mr->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+ mr->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &mkey_attr);
if (!mr->mkey) {
DRV_LOG(ERR, "Failed to create Mkey for lm.");
goto err;
DRV_LOG(ERR, "Failed to allocate mem entry memory.");
goto error;
}
- entry->umem = mlx5_glue->devx_umem_reg(priv->ctx,
+ entry->umem = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
(void *)(uintptr_t)reg->host_user_addr,
reg->size, IBV_ACCESS_LOCAL_WRITE);
if (!entry->umem) {
mkey_attr.umem_id = entry->umem->umem_id;
mkey_attr.pd = priv->pdn;
mkey_attr.pg_access = 1;
- entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+ entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx,
+ &mkey_attr);
if (!entry->mkey) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
ret = -rte_errno;
ret = -ENOMEM;
goto error;
}
- entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
+ entry->mkey = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &mkey_attr);
if (!entry->mkey) {
DRV_LOG(ERR, "Failed to create indirect Mkey.");
ret = -rte_errno;
attr->rqt_max_size = rqt_n;
attr->rqt_actual_size = rqt_n;
if (!priv->steer.rqt) {
- priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);
+ priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->cdev->ctx,
+ attr);
if (!priv->steer.rqt) {
DRV_LOG(ERR, "Failed to create RQT.");
ret = -rte_errno;
tir_att.rx_hash_field_selector_outer.selected_fields =
vars[i][HASH];
priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
- (priv->ctx, &dv_attr, priv->steer.tbl);
+ (priv->cdev->ctx, &dv_attr, priv->steer.tbl);
if (!priv->steer.rss[i].matcher) {
DRV_LOG(ERR, "Failed to create matcher %d.", i);
goto error;
}
- priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,
- &tir_att);
+ priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir
+ (priv->cdev->ctx, &tir_att);
if (!priv->steer.rss[i].tir) {
DRV_LOG(ERR, "Failed to create TIR %d.", i);
goto error;
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
{
#ifdef HAVE_MLX5DV_DR
- priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
+ priv->steer.domain = mlx5_glue->dr_create_domain(priv->cdev->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
if (!priv->steer.domain) {
DRV_LOG(ERR, "Failed to create Rx domain.");
if (priv->caps.queue_counters_valid) {
if (!virtq->counters)
virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
- (priv->ctx);
+ (priv->cdev->ctx);
if (!virtq->counters) {
DRV_LOG(ERR, "Failed to create virtq couners for virtq"
" %d.", index);
" %u.", i, index);
goto error;
}
- virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,
+ virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,
virtq->umems[i].buf,
virtq->umems[i].size,
IBV_ACCESS_LOCAL_WRITE);
attr.hw_latency_mode = priv->hw_latency_mode;
attr.hw_max_latency_us = priv->hw_max_latency_us;
attr.hw_max_pending_comp = priv->hw_max_pending_comp;
- virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
+ virtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
virtq->priv = priv;
if (!virtq->virtq)
goto error;
mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
{
struct mlx5_devx_tis_attr tis_attr = {0};
+ struct ibv_context *ctx = priv->cdev->ctx;
uint32_t i;
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
}
/* Always map the entire page. */
priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
- PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
+ PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
priv->var->mmap_off);
if (priv->virtq_db_addr == MAP_FAILED) {
DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
priv->virtq_db_addr);
}
- priv->td = mlx5_devx_cmd_create_td(priv->ctx);
+ priv->td = mlx5_devx_cmd_create_td(ctx);
if (!priv->td) {
DRV_LOG(ERR, "Failed to create transport domain.");
return -rte_errno;
for (i = 0; i < priv->num_lag_ports; i++) {
/* 0 is auto affinity, non-zero value to propose port. */
tis_attr.lag_tx_port_affinity = i + 1;
- priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
+ priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
if (!priv->tiss[i]) {
DRV_LOG(ERR, "Failed to create TIS %u.", i);
goto error;