strerror(rte_errno));
goto error;
}
- sh = mlx5_alloc_shared_ibctx(spawn, &config);
+ sh = mlx5_alloc_shared_dev_ctx(spawn, &config);
if (!sh)
return NULL;
config.devx = sh->devx;
goto error;
}
priv->sh = sh;
- priv->ibv_port = spawn->phys_port;
+ priv->dev_port = spawn->phys_port;
priv->pci_dev = spawn->pci_dev;
priv->mtu = RTE_ETHER_MTU;
priv->mp_id.port_id = port_id;
rte_eth_dev_release_port(eth_dev);
}
if (sh)
- mlx5_free_shared_ibctx(sh);
+ mlx5_free_shared_dev_ctx(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
/** Driver-specific log messages type. */
int mlx5_logtype;
-static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
+ LIST_HEAD_INITIALIZER();
+static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
}
/**
- * Allocate shared IB device context. If there is multiport device the
+ * Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
- * port dedicated IB device, the context will be used by only given
+ * port dedicated device, the context will be used by only given
* port due to unification.
*
- * Routine first searches the context for the specified IB device name,
+ * Routine first searches the context for the specified device name,
* if found the shared context assumed and reference counter is incremented.
* If no context found the new one is created and initialized with specified
- * IB device context and parameters.
+ * device context and parameters.
*
* @param[in] spawn
- * Pointer to the IB device attributes (name, port, etc).
+ * Pointer to the device attributes (name, port, etc).
* @param[in] config
* Pointer to device configuration structure.
*
* otherwise NULL and rte_errno is set.
*/
struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
- LIST_FOREACH(sh, &mlx5_ibv_list, next) {
+ LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
if (!strcmp(sh->ibdev_name,
mlx5_os_get_dev_device_name(spawn->phys_dev))) {
sh->refcnt++;
sh = rte_zmalloc("ethdev shared ib context",
sizeof(struct mlx5_dev_ctx_shared) +
spawn->max_port *
- sizeof(struct mlx5_ibv_shared_port),
+ sizeof(struct mlx5_dev_shared_port),
RTE_CACHE_LINE_SIZE);
if (!sh) {
DRV_LOG(ERR, "shared context allocation failure");
sh, mem_event_cb);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
- LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
+ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
* Pointer to mlx5_dev_ctx_shared object to free
*/
void
-mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
+mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
- LIST_FOREACH(lctx, &mlx5_ibv_list, next)
+ LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
if (lctx == sh)
break;
MLX5_ASSERT(lctx);
mlx5_flow_id_pool_release(sh->flow_id_pool);
rte_free(sh);
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
* mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
* ifindex if Netlink fails.
*/
- mlx5_free_shared_ibctx(priv->sh);
+ mlx5_free_shared_dev_ctx(priv->sh);
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;
#define MLX5_AGE_GET(age_info, BIT) \
((age_info)->flags & (1 << (BIT)))
#define GET_PORT_AGE_INFO(priv) \
- (&((priv)->sh->port[(priv)->ibv_port - 1].age_info))
+ (&((priv)->sh->port[(priv)->dev_port - 1].age_info))
/* Aging information for per port. */
struct mlx5_age_info {
};
/* Per port data of shared IB device. */
-struct mlx5_ibv_shared_port {
+struct mlx5_dev_shared_port {
uint32_t ih_port_id;
uint32_t devx_ih_port_id;
/*
struct mlx5_devx_obj *tis; /* TIS object. */
struct mlx5_devx_obj *td; /* Transport domain. */
struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
- struct mlx5_ibv_shared_port port[]; /* per device port data array. */
+ struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
/* Per-process private structure. */
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
- uint32_t ibv_port; /* IB device port number. */
+ uint32_t dev_port; /* Device port number. */
struct rte_pci_device *pci_dev; /* Backend PCI device. */
struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config);
-void mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh);
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config);
+void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
void mlx5_free_table_hash_list(struct mlx5_priv *priv);
int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
} flow_attr = {
.attr = {
.num_of_specs = 2,
- .port = (uint8_t)priv->ibv_port,
+ .port = (uint8_t)priv->dev_port,
},
.eth = {
.type = IBV_FLOW_SPEC_ETH,
*cache_resource = *resource;
/*
* Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+ * either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port)
* or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
*/
cache_resource->action =
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
- *dst_port_id = priv->ibv_port;
+ *dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
/* Other members of attr will be ignored. */
dev_flow->verbs.attr.priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
- dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
+ dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
return 0;
}
container_of(txq, struct mlx5_txq_ctrl, txq);
struct ibv_qp_attr mod = {
.qp_state = IBV_QPS_RESET,
- .port_num = (uint8_t)priv->ibv_port,
+ .port_num = (uint8_t)priv->dev_port,
};
struct ibv_qp *qp = txq_ctrl->obj->qp;
if (priv->sh) {
MKSTR(path, "%s/ports/%d/hw_counters/%s",
priv->sh->ibdev_path,
- priv->ibv_port,
+ priv->dev_port,
ctr_name);
fd = open(path, O_RDONLY);
if (fd != -1) {
/* Enable datapath on secondary process. */
mlx5_mp_req_start_rxtx(dev);
if (priv->sh->intr_handle.fd >= 0) {
- priv->sh->port[priv->ibv_port - 1].ih_port_id =
+ priv->sh->port[priv->dev_port - 1].ih_port_id =
(uint32_t)dev->data->port_id;
} else {
DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
dev->data->dev_conf.intr_conf.rmv = 0;
}
if (priv->sh->intr_handle_devx.fd >= 0)
- priv->sh->port[priv->ibv_port - 1].devx_ih_port_id =
+ priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
(uint32_t)dev->data->port_id;
return 0;
error:
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, &priv->flows, true);
mlx5_rx_intr_vec_disable(dev);
- priv->sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
- priv->sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
+ priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+ priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
}
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* IB device port number. */
- .port_num = (uint8_t)priv->ibv_port,
+ .port_num = (uint8_t)priv->dev_port,
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
(IBV_QP_STATE | IBV_QP_PORT));