#endif
}
+__rte_internal
static inline void *
mlx5_os_alloc_pd(void *ctx)
{
return mlx5_glue->alloc_pd(ctx);
}
+__rte_internal
static inline int
mlx5_os_dealloc_pd(void *pd)
{
return mlx5_glue->dealloc_pd(pd);
}
+__rte_internal
static inline void *
mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access)
{
return mlx5_glue->devx_umem_reg(ctx, addr, size, access);
}
+__rte_internal
static inline int
mlx5_os_umem_dereg(void *pumem)
{
return mlx5_glue->devx_umem_dereg(pumem);
}
+static inline void *
+mlx5_os_devx_create_event_channel(void *ctx, int flags)
+{
+ return mlx5_glue->devx_create_event_channel(ctx, flags);
+}
+
+static inline void
+mlx5_os_devx_destroy_event_channel(void *eventc)
+{
+ mlx5_glue->devx_destroy_event_channel(eventc);
+}
+
+static inline int
+mlx5_os_devx_subscribe_devx_event(void *eventc,
+ void *obj,
+ uint16_t events_sz, uint16_t events_num[],
+ uint64_t cookie)
+{
+ return mlx5_glue->devx_subscribe_devx_event(eventc, obj, events_sz,
+ events_num, cookie);
+}
+
/**
* Memory allocation optionally with alignment.
*
{
free(addr);
}
+
+__rte_internal
+struct ibv_device *
+mlx5_os_get_ibv_device(struct rte_pci_addr *addr);
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */