Replace all 'mlx5_ibv_shared' appearances with 'mlx5_dev_ctx_shared'.
Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
struct rte_pci_device *pci_dev; /**< Backend PCI device. */
};
-static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
+static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
* Initialize the shared aging list information per port.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
static void
-mlx5_flow_aging_init(struct mlx5_ibv_shared *sh)
+mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
{
uint32_t i;
struct mlx5_age_info *age_info;
* Initialize the counters management structure.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object to free
+ * Pointer to mlx5_dev_ctx_shared object to free
*/
static void
-mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
+mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
int i;
* Close and release all the resources of the counters management.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object to free.
+ * Pointer to mlx5_dev_ctx_shared object to free.
*/
static void
-mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
+mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
{
struct mlx5_counter_stats_mem_mng *mng;
int i;
* Initialize the flow resources' indexed mempool.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
* @param[in] sh
* Pointer to user dev config.
*/
static void
-mlx5_flow_ipool_create(struct mlx5_ibv_shared *sh,
+mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
const struct mlx5_dev_config *config __rte_unused)
{
uint8_t i;
* Release the flow resources' indexed mempool.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
static void
-mlx5_flow_ipool_destroy(struct mlx5_ibv_shared *sh)
+mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
{
uint8_t i;
* between multiple ports of single IB device.
*
* @param sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
static void
-mlx5_dev_shared_handler_install(struct mlx5_ibv_shared *sh)
+mlx5_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
{
int ret;
int flags;
* between multiple ports of single IB device.
*
* @param dev
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
static void
-mlx5_dev_shared_handler_uninstall(struct mlx5_ibv_shared *sh)
+mlx5_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
{
if (sh->intr_handle.fd >= 0)
mlx5_intr_callback_unregister(&sh->intr_handle,
* Pointer to device configuration structure.
*
* @return
- * Pointer to mlx5_ibv_shared object on success,
+ * Pointer to mlx5_dev_ctx_shared object on success,
* otherwise NULL and rte_errno is set.
*/
-static struct mlx5_ibv_shared *
+static struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config)
{
- struct mlx5_ibv_shared *sh;
+ struct mlx5_dev_ctx_shared *sh;
int dbmap_env;
int err = 0;
uint32_t i;
/* No device found, we have to create new shared context. */
MLX5_ASSERT(spawn->max_port);
sh = rte_zmalloc("ethdev shared ib context",
- sizeof(struct mlx5_ibv_shared) +
+ sizeof(struct mlx5_dev_ctx_shared) +
spawn->max_port *
sizeof(struct mlx5_ibv_shared_port),
RTE_CACHE_LINE_SIZE);
* all allocated resources and close handles.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object to free
+ * Pointer to mlx5_dev_ctx_shared object to free
*/
static void
-mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
+mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
{
pthread_mutex_lock(&mlx5_ibv_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
- struct mlx5_ibv_shared *lctx;
+ struct mlx5_dev_ctx_shared *lctx;
LIST_FOREACH(lctx, &mlx5_ibv_list, next)
if (lctx == sh)
static void
mlx5_free_table_hash_list(struct mlx5_priv *priv)
{
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data;
union mlx5_flow_tbl_key table_key = {
{
static int
mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
{
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_HLIST_NAMESIZE];
int err = 0;
static int
mlx5_alloc_shared_dr(struct mlx5_priv *priv)
{
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
char s[MLX5_HLIST_NAMESIZE];
int err = 0;
static void
mlx5_free_shared_dr(struct mlx5_priv *priv)
{
- struct mlx5_ibv_shared *sh;
+ struct mlx5_dev_ctx_shared *sh;
if (!priv->dr_shared)
return;
mlx5_set_metadata_mask(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t meta, mark, reg_c0;
reg_c0 = ~priv->vport_meta_mask;
mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
struct mlx5_dev_config *config)
{
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_dev_config *sh_conf = NULL;
uint16_t port_id;
struct mlx5_dev_config config)
{
const struct mlx5_switch_info *switch_info = &spawn->info;
- struct mlx5_ibv_shared *sh = NULL;
+ struct mlx5_dev_ctx_shared *sh = NULL;
struct ibv_port_attr port_attr;
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
struct rte_eth_dev *eth_dev = NULL;
#define MLX5_MP_NAME "net_mlx5_mp"
-LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared);
+LIST_HEAD(mlx5_dev_list, mlx5_dev_ctx_shared);
/* Shared data between primary and secondary processes. */
struct mlx5_shared_data {
* Shared Infiniband device context for Master/Representors
* which belong to same IB device with multiple IB ports.
**/
-struct mlx5_ibv_shared {
- LIST_ENTRY(mlx5_ibv_shared) next;
+struct mlx5_dev_ctx_shared {
+ LIST_ENTRY(mlx5_dev_ctx_shared) next;
uint32_t refcnt;
uint32_t devx:1; /* Opened with DV. */
uint32_t max_port; /* Maximal IB device port index. */
char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ibv_device_attr_ex device_attr; /* Device properties. */
- LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
+ LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache share_cache;
/* Shared DV/DR flow data section. */
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
- struct mlx5_ibv_shared *sh; /* Shared IB device context. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
uint32_t ibv_port; /* IB device port number. */
struct rte_pci_device *pci_dev; /* Backend PCI device. */
struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
-void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
+void mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
uint64_t async_id, int status);
-void mlx5_set_query_alarm(struct mlx5_ibv_shared *sh);
+void mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh);
void mlx5_flow_query_alarm(void *arg);
uint32_t mlx5_counter_alloc(struct rte_eth_dev *dev);
void mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt);
* Infiniband device shared context.
*/
static void
-mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh)
+mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh)
{
uint32_t i;
void
mlx5_dev_interrupt_handler(void *cb_arg)
{
- struct mlx5_ibv_shared *sh = cb_arg;
+ struct mlx5_dev_ctx_shared *sh = cb_arg;
struct ibv_async_event event;
/* Read all message from the IB device and acknowledge them. */
(void)cb_arg;
return;
#else
- struct mlx5_ibv_shared *sh = cb_arg;
+ struct mlx5_dev_ctx_shared *sh = cb_arg;
union {
struct mlx5dv_devx_async_cmd_hdr cmd_resp;
uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
* Get number of all validate pools.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*
* @return
* The number of all validate pools.
*/
static uint32_t
-mlx5_get_all_valid_pool_count(struct mlx5_ibv_shared *sh)
+mlx5_get_all_valid_pool_count(struct mlx5_dev_ctx_shared *sh)
{
int i;
uint32_t pools_n = 0;
* the counter pools.
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
*/
void
-mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
+mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
{
uint32_t pools_n, us;
void
mlx5_flow_query_alarm(void *arg)
{
- struct mlx5_ibv_shared *sh = arg;
+ struct mlx5_dev_ctx_shared *sh = arg;
struct mlx5_devx_obj *dcs;
uint16_t offset;
int ret;
* Check and callback event for new aged flow in the counter pool
*
* @param[in] sh
- * Pointer to mlx5_ibv_shared object.
+ * Pointer to mlx5_dev_ctx_shared object.
* @param[in] pool
* Pointer to Current counter pool.
*/
static void
-mlx5_flow_aging_check(struct mlx5_ibv_shared *sh,
+mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
struct mlx5_flow_counter_pool *pool)
{
struct mlx5_priv *priv;
* query. This function is probably called by the host thread.
*
* @param[in] sh
- * The pointer to the shared IB device context.
+ * The pointer to the shared device context.
* @param[in] async_id
* The Devx async ID.
* @param[in] status
* The status of the completion.
*/
void
-mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
+mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
uint64_t async_id, int status)
{
struct mlx5_flow_counter_pool *pool =
struct rte_flow_error *error __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
sh->tx_domain, file);
flow_dv_shared_lock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
flow_dv_shared_unlock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
uint32_t idx = 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
struct mlx5_flow_tbl_resource *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
uint32_t tag_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *tag;
tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
* Size of freed memory.
*/
static void
-mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
+mlx5_mr_mem_event_free_cb(struct mlx5_dev_ctx_shared *sh,
const void *addr, size_t len)
{
const struct rte_memseg_list *msl;
mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct mlx5_ibv_shared *sh;
+ struct mlx5_dev_ctx_shared *sh;
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx5_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
struct rte_eth_dev *dev;
struct mlx5_mr *mr;
struct mlx5_priv *priv;
- struct mlx5_ibv_shared *sh;
+ struct mlx5_dev_ctx_shared *sh;
dev = pci_dev_to_eth_dev(pdev);
if (!dev) {
{
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
- struct mlx5_ibv_shared *sh;
+ struct mlx5_dev_ctx_shared *sh;
struct mlx5_mr *mr;
struct mr_cache_entry entry;