* mlx4 driver initialization.
*/
-#include <assert.h>
-#include <dlfcn.h>
#include <errno.h>
#include <inttypes.h>
#include <stddef.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
+#ifdef RTE_IBVERBS_LINK_DLOPEN
+#include <dlfcn.h>
+#endif
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#endif
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_dev.h>
#include <rte_errno.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_interrupts.h>
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
+#ifdef MLX4_GLUE
+const struct mlx4_glue *mlx4_glue;
+#endif
+
static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data";
/* Shared memory between primary and secondary processes. */
NULL,
};
-static void mlx4_dev_stop(struct rte_eth_dev *dev);
+static int mlx4_dev_stop(struct rte_eth_dev *dev);
/**
* Initialize shared data between primary and secondary process.
return ret;
}
-/**
- * Uninitialize shared data between primary and secondary process.
- *
- * The pointer of secondary process is dereferenced and primary process frees
- * the memzone.
- */
-static void
-mlx4_uninit_shared_data(void)
-{
- const struct rte_memzone *mz;
-
- rte_spinlock_lock(&mlx4_shared_data_lock);
- if (mlx4_shared_data) {
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA);
- rte_memzone_free(mz);
- } else {
- memset(&mlx4_local_data, 0, sizeof(mlx4_local_data));
- }
- mlx4_shared_data = NULL;
- }
- rte_spinlock_unlock(&mlx4_shared_data_lock);
-}
-
#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
/**
* Verbs callback to allocate a memory. This function should allocate the space
socket = rxq->socket;
}
- assert(data != NULL);
+ MLX4_ASSERT(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
if (!ret && size)
rte_errno = ENOMEM;
static void
mlx4_free_verbs_buf(void *ptr, void *data __rte_unused)
{
- assert(data != NULL);
+ MLX4_ASSERT(data != NULL);
rte_free(ptr);
}
#endif
+/**
+ * Initialize process private data structure.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_proc_priv_init(struct rte_eth_dev *dev)
+{
+ struct mlx4_proc_priv *ppriv;
+ size_t ppriv_size;
+
+ mlx4_proc_priv_uninit(dev);
+ /*
+ * UAR register table follows the process private structure. BlueFlame
+ * registers for Tx queues are stored in the table.
+ */
+ ppriv_size = sizeof(struct mlx4_proc_priv) +
+ dev->data->nb_tx_queues * sizeof(void *);
+ ppriv = rte_zmalloc_socket("mlx4_proc_priv", ppriv_size,
+ RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+ if (!ppriv) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ ppriv->uar_table_sz = dev->data->nb_tx_queues;
+ dev->process_private = ppriv;
+ return 0;
+}
+
+/**
+ * Un-initialize process private data structure.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_proc_priv_uninit(struct rte_eth_dev *dev)
+{
+ if (!dev->process_private)
+ return;
+ rte_free(dev->process_private);
+ dev->process_private = NULL;
+}
+
/**
* DPDK callback for Ethernet device configuration.
*
goto exit;
}
ret = mlx4_intr_install(priv);
- if (ret)
+ if (ret) {
ERROR("%p: interrupt handler installation failed",
(void *)dev);
+ goto exit;
+ }
+ ret = mlx4_proc_priv_init(dev);
+ if (ret) {
+ ERROR("%p: process private data allocation failed",
+ (void *)dev);
+ goto exit;
+ }
exit:
return ret;
}
return 0;
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
priv->started = 1;
- ret = mlx4_tx_uar_remap(dev, priv->ctx->cmd_fd);
- if (ret) {
- ERROR("%p: cannot remap UAR", (void *)dev);
- goto err;
- }
ret = mlx4_rss_init(priv);
if (ret) {
ERROR("%p: cannot initialize RSS resources: %s",
(void *)dev, strerror(-ret));
goto err;
}
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX4_DEBUG
mlx4_mr_dump_dev(dev);
#endif
ret = mlx4_rxq_intr_enable(priv);
* @param dev
* Pointer to Ethernet device structure.
*/
-static void
+static int
mlx4_dev_stop(struct rte_eth_dev *dev)
{
struct mlx4_priv *priv = dev->data->dev_private;
-#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
- const size_t page_size = sysconf(_SC_PAGESIZE);
- int i;
-#endif
if (!priv->started)
- return;
+ return 0;
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
dev->tx_pkt_burst = mlx4_tx_burst_removed;
mlx4_flow_sync(priv, NULL);
mlx4_rxq_intr_disable(priv);
mlx4_rss_deinit(priv);
-#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
- for (i = 0; i != dev->data->nb_tx_queues; ++i) {
- struct txq *txq;
- txq = dev->data->tx_queues[i];
- if (!txq)
- continue;
- munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->msq.db,
- page_size), page_size);
- }
-#endif
+ return 0;
}
/**
* @param dev
* Pointer to Ethernet device structure.
*/
-static void
+static int
mlx4_dev_close(struct rte_eth_dev *dev)
{
struct mlx4_priv *priv = dev->data->dev_private;
unsigned int i;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ rte_eth_dev_release_port(dev);
+ return 0;
+ }
DEBUG("%p: closing device \"%s\"",
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
mlx4_flow_clean(priv);
mlx4_rss_deinit(priv);
for (i = 0; i != dev->data->nb_rx_queues; ++i)
- mlx4_rx_queue_release(dev->data->rx_queues[i]);
+ mlx4_rx_queue_release(dev, i);
for (i = 0; i != dev->data->nb_tx_queues; ++i)
- mlx4_tx_queue_release(dev->data->tx_queues[i]);
+ mlx4_tx_queue_release(dev, i);
+ mlx4_proc_priv_uninit(dev);
mlx4_mr_release(dev);
if (priv->pd != NULL) {
- assert(priv->ctx != NULL);
+ MLX4_ASSERT(priv->ctx != NULL);
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
claim_zero(mlx4_glue->close_device(priv->ctx));
} else
- assert(priv->ctx == NULL);
+ MLX4_ASSERT(priv->ctx == NULL);
mlx4_intr_uninstall(priv);
memset(priv, 0, sizeof(*priv));
+ /* mac_addrs must not be freed because part of dev_private */
+ dev->data->mac_addrs = NULL;
+ return 0;
}
static const struct eth_dev_ops mlx4_dev_ops = {
.mac_addr_remove = mlx4_mac_addr_remove,
.mac_addr_add = mlx4_mac_addr_add,
.mac_addr_set = mlx4_mac_addr_set,
+ .set_mc_addr_list = mlx4_set_mc_addr_list,
.stats_get = mlx4_stats_get,
.stats_reset = mlx4_stats_reset,
.fw_version_get = mlx4_fw_version_get,
.flow_ctrl_get = mlx4_flow_ctrl_get,
.flow_ctrl_set = mlx4_flow_ctrl_set,
.mtu_set = mlx4_mtu_set,
- .filter_ctrl = mlx4_filter_ctrl,
+ .flow_ops_get = mlx4_flow_ops_get,
.rx_queue_intr_enable = mlx4_rx_intr_enable,
.rx_queue_intr_disable = mlx4_rx_intr_disable,
.is_removed = mlx4_is_removed,
&pci_addr->bus,
&pci_addr->devid,
&pci_addr->function) == 4) {
- ret = 0;
break;
}
}
static struct rte_pci_driver mlx4_driver;
-static int
-find_lower_va_bound(const struct rte_memseg_list *msl,
- const struct rte_memseg *ms, void *arg)
-{
- void **addr = arg;
-
- if (msl->external)
- return 0;
- if (*addr == NULL)
- *addr = ms->addr;
- else
- *addr = RTE_MIN(*addr, ms->addr);
-
- return 0;
-}
-
-/**
- * Reserve UAR address space for primary process.
- *
- * Process local resource is used by both primary and secondary to avoid
- * duplicate reservation. The space has to be available on both primary and
- * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
- * check.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_uar_init_primary(void)
-{
- struct mlx4_shared_data *sd = mlx4_shared_data;
- void *addr = (void *)0;
-
- if (sd->uar_base)
- return 0;
- /* find out lower bound of hugepage segments */
- rte_memseg_walk(find_lower_va_bound, &addr);
- /* keep distance to hugepages to minimize potential conflicts. */
- addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX4_UAR_OFFSET + MLX4_UAR_SIZE));
- /* anonymous mmap, no real memory consumption. */
- addr = mmap(addr, MLX4_UAR_SIZE,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- ERROR("failed to reserve UAR address space, please"
- " adjust MLX4_UAR_SIZE or try --base-virtaddr");
- rte_errno = ENOMEM;
- return -rte_errno;
- }
- /* Accept either same addr or a new addr returned from mmap if target
- * range occupied.
- */
- INFO("reserved UAR address space: %p", addr);
- sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
- return 0;
-}
-
-/**
- * Unmap UAR address space reserved for primary process.
- */
-static void
-mlx4_uar_uninit_primary(void)
-{
- struct mlx4_shared_data *sd = mlx4_shared_data;
-
- if (!sd->uar_base)
- return;
- munmap(sd->uar_base, MLX4_UAR_SIZE);
- sd->uar_base = NULL;
-}
-
-/**
- * Reserve UAR address space for secondary process, align with primary process.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_uar_init_secondary(void)
-{
- struct mlx4_shared_data *sd = mlx4_shared_data;
- struct mlx4_local_data *ld = &mlx4_local_data;
- void *addr;
-
- if (ld->uar_base) { /* Already reserved. */
- assert(sd->uar_base == ld->uar_base);
- return 0;
- }
- assert(sd->uar_base);
- /* anonymous mmap, no real memory consumption. */
- addr = mmap(sd->uar_base, MLX4_UAR_SIZE,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- ERROR("UAR mmap failed: %p size: %llu",
- sd->uar_base, MLX4_UAR_SIZE);
- rte_errno = ENXIO;
- return -rte_errno;
- }
- if (sd->uar_base != addr) {
- ERROR("UAR address %p size %llu occupied, please"
- " adjust MLX4_UAR_OFFSET or try EAL parameter"
- " --base-virtaddr",
- sd->uar_base, MLX4_UAR_SIZE);
- rte_errno = ENXIO;
- return -rte_errno;
- }
- ld->uar_base = addr;
- INFO("reserved UAR address space: %p", addr);
- return 0;
-}
-
-/**
- * Unmap UAR address space reserved for secondary process.
- */
-static void
-mlx4_uar_uninit_secondary(void)
-{
- struct mlx4_local_data *ld = &mlx4_local_data;
-
- if (!ld->uar_base)
- return;
- munmap(ld->uar_base, MLX4_UAR_SIZE);
- ld->uar_base = NULL;
-}
-
/**
* PMD global initialization.
*
{
struct mlx4_shared_data *sd;
struct mlx4_local_data *ld = &mlx4_local_data;
- int ret;
+ int ret = 0;
if (mlx4_init_shared_data())
return -rte_errno;
sd = mlx4_shared_data;
- assert(sd);
+ MLX4_ASSERT(sd);
rte_spinlock_lock(&sd->lock);
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
rte_rwlock_init(&sd->mem_event_rwlock);
rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
mlx4_mr_mem_event_cb, NULL);
- mlx4_mp_init_primary();
- ret = mlx4_uar_init_primary();
+ ret = mlx4_mp_init_primary();
if (ret)
- goto error;
- sd->init_done = true;
+ goto out;
+ sd->init_done = 1;
break;
case RTE_PROC_SECONDARY:
if (ld->init_done)
break;
- mlx4_mp_init_secondary();
- ret = mlx4_uar_init_secondary();
+ ret = mlx4_mp_init_secondary();
if (ret)
- goto error;
+ goto out;
++sd->secondary_cnt;
- ld->init_done = true;
+ ld->init_done = 1;
break;
default:
break;
}
+out:
rte_spinlock_unlock(&sd->lock);
- return 0;
-error:
- switch (rte_eal_process_type()) {
- case RTE_PROC_PRIMARY:
- mlx4_uar_uninit_primary();
- mlx4_mp_uninit_primary();
- rte_mem_event_callback_unregister("MLX4_MEM_EVENT_CB", NULL);
- break;
- case RTE_PROC_SECONDARY:
- mlx4_uar_uninit_secondary();
- mlx4_mp_uninit_secondary();
- break;
- default:
- break;
- }
- rte_spinlock_unlock(&sd->lock);
- mlx4_uninit_shared_data();
- return -rte_errno;
+ return ret;
}
/**
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
struct ibv_device_attr_ex device_attr_ex;
+ struct rte_eth_dev *prev_dev = NULL;
struct mlx4_conf conf = {
.ports.present = 0,
.mr_ext_memseg_en = 1,
};
unsigned int vf;
int i;
+ char ifname[IF_NAMESIZE];
(void)pci_drv;
err = mlx4_init_once();
strerror(rte_errno));
return -rte_errno;
}
- assert(pci_drv == &mlx4_driver);
+ MLX4_ASSERT(pci_drv == &mlx4_driver);
list = mlx4_glue->get_device_list(&i);
if (list == NULL) {
rte_errno = errno;
- assert(rte_errno);
+ MLX4_ASSERT(rte_errno);
if (rte_errno == ENOSYS)
ERROR("cannot list devices, is ib_uverbs loaded?");
return -rte_errno;
}
- assert(i >= 0);
+ MLX4_ASSERT(i >= 0);
/*
* For each listed device, check related sysfs entry against
* the provided PCI ID.
ERROR("cannot use device, are drivers up to date?");
return -rte_errno;
}
- assert(err > 0);
+ MLX4_ASSERT(err > 0);
rte_errno = err;
return -rte_errno;
}
err = ENODEV;
goto error;
}
- assert(device_attr.max_sge >= MLX4_MAX_SGE);
+ MLX4_ASSERT(device_attr.max_sge >= MLX4_MAX_SGE);
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
struct ibv_context *ctx = NULL;
struct ibv_pd *pd = NULL;
struct mlx4_priv *priv = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
char name[RTE_ETH_NAME_MAX_LEN];
/* If port is not enabled, skip. */
ERROR("can not attach rte ethdev");
rte_errno = ENOMEM;
err = rte_errno;
- goto error;
+ goto err_secondary;
}
priv = eth_dev->data->dev_private;
if (!priv->verbs_alloc_ctx.enabled) {
" from Verbs");
rte_errno = ENOTSUP;
err = rte_errno;
- goto error;
+ goto err_secondary;
}
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx4_dev_sec_ops;
+ err = mlx4_proc_priv_init(eth_dev);
+ if (err)
+ goto err_secondary;
/* Receive command fd from primary process. */
err = mlx4_mp_req_verbs_cmd_fd(eth_dev);
if (err < 0) {
err = rte_errno;
- goto error;
+ goto err_secondary;
}
/* Remap UAR for Tx queues. */
- err = mlx4_tx_uar_remap(eth_dev, err);
+ err = mlx4_tx_uar_init_secondary(eth_dev, err);
if (err) {
err = rte_errno;
- goto error;
+ goto err_secondary;
}
/*
* Ethdev pointer is still required as input since
claim_zero(mlx4_glue->close_device(ctx));
rte_eth_copy_pci_info(eth_dev, pci_dev);
rte_eth_dev_probing_finish(eth_dev);
+ prev_dev = eth_dev;
continue;
+err_secondary:
+ claim_zero(mlx4_glue->close_device(ctx));
+ rte_eth_dev_release_port(eth_dev);
+ if (prev_dev)
+ rte_eth_dev_release_port(prev_dev);
+ break;
}
/* Check port status. */
err = mlx4_glue->query_port(ctx, port, &port_attr);
priv->device_attr = device_attr;
priv->port = port;
priv->pd = pd;
- priv->mtu = ETHER_MTU;
+ priv->mtu = RTE_ETHER_MTU;
priv->vf = vf;
priv->hw_csum = !!(device_attr.device_cap_flags &
IBV_DEVICE_RAW_IP_CSUM);
" (error: %s)", strerror(err));
goto port_error;
}
- INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- priv->port,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ INFO("port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
+ priv->port, RTE_ETHER_ADDR_BYTES(&mac));
/* Register MAC address. */
priv->mac[0] = mac;
-#ifndef NDEBUG
- {
- char ifname[IF_NAMESIZE];
-
- if (mlx4_get_ifname(priv, &ifname) == 0)
- DEBUG("port %u ifname is \"%s\"",
- priv->port, ifname);
- else
- DEBUG("port %u ifname is unknown", priv->port);
+
+ if (mlx4_get_ifname(priv, &ifname) == 0) {
+ DEBUG("port %u ifname is \"%s\"",
+ priv->port, ifname);
+ priv->if_index = if_nametoindex(ifname);
+ } else {
+ DEBUG("port %u ifname is unknown", priv->port);
}
-#endif
+
/* Get actual MTU if possible. */
mlx4_mtu_get(priv, &priv->mtu);
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Initialize local interrupt handle for current port. */
- priv->intr_handle = (struct rte_intr_handle){
- .fd = -1,
- .type = RTE_INTR_HANDLE_EXT,
- };
+ memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle));
+ priv->intr_handle.fd = -1;
+ priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
/*
* Override ethdev interrupt handle pointer with private
* handle instead of that of the parent PCI device used by
eth_dev->dev_ops = &mlx4_dev_ops;
#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
/* Hint libmlx4 to use PMD allocator for data plane resources */
- struct mlx4dv_ctx_allocators alctr = {
- .alloc = &mlx4_alloc_verbs_buf,
- .free = &mlx4_free_verbs_buf,
- .data = priv,
- };
err = mlx4_glue->dv_set_context_attr
(ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS,
- (void *)((uintptr_t)&alctr));
+ (void *)((uintptr_t)&(struct mlx4dv_ctx_allocators){
+ .alloc = &mlx4_alloc_verbs_buf,
+ .free = &mlx4_free_verbs_buf,
+ .data = priv,
+ }));
if (err)
WARN("Verbs external allocator is not supported");
else
priv, mem_event_cb);
rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock);
rte_eth_dev_probing_finish(eth_dev);
+ prev_dev = eth_dev;
continue;
port_error:
rte_free(priv);
eth_dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(eth_dev);
}
+ if (prev_dev)
+ mlx4_dev_close(prev_dev);
break;
}
- /*
- * XXX if something went wrong in the loop above, there is a resource
- * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
- * long as the dpdk does not provide a way to deallocate a ethdev and a
- * way to enumerate the registered ethdevs to free the previous ones.
- */
error:
if (attr_ctx)
claim_zero(mlx4_glue->close_device(attr_ctx));
return -err;
}
+/**
+ * DPDK callback to remove a PCI device.
+ *
+ * This function removes all Ethernet devices belong to a given PCI device.
+ *
+ * @param[in] pci_dev
+ * Pointer to the PCI device.
+ *
+ * @return
+ * 0 on success, the function cannot fail.
+ */
+static int
+mlx4_pci_remove(struct rte_pci_device *pci_dev)
+{
+ uint16_t port_id;
+ int ret = 0;
+
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
+ /*
+ * mlx4_dev_close() is not registered to secondary process,
+ * call the close function explicitly for secondary process.
+ */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ ret |= mlx4_dev_close(&rte_eth_devices[port_id]);
+ else
+ ret |= rte_eth_dev_close(port_id);
+ }
+ return ret == 0 ? 0 : -EIO;
+}
+
static const struct rte_pci_id mlx4_pci_id_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
},
.id_table = mlx4_pci_id_map,
.probe = mlx4_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_INTR_RMV,
+ .remove = mlx4_pci_remove,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
};
#ifdef RTE_IBVERBS_LINK_DLOPEN
#endif
+/* Initialize driver log type. */
+RTE_LOG_REGISTER_DEFAULT(mlx4_logtype, NOTICE)
+
/**
* Driver initialization routine.
*/
#ifdef RTE_IBVERBS_LINK_DLOPEN
if (mlx4_glue_init())
return;
- assert(mlx4_glue);
+ MLX4_ASSERT(mlx4_glue);
#endif
-#ifndef NDEBUG
+#ifdef RTE_LIBRTE_MLX4_DEBUG
/* Glue structure must not contain any NULL pointers. */
{
unsigned int i;
for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
- assert(((const void *const *)mlx4_glue)[i]);
+ MLX4_ASSERT(((const void *const *)mlx4_glue)[i]);
}
#endif
if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {