Use the MLX4_ASSERT macros instead of the standard assert clause.
Depends on the RTE_LIBRTE_MLX4_DEBUG configuration option to define it.
If RTE_LIBRTE_MLX4_DEBUG is enabled MLX4_ASSERT is equal to RTE_VERIFY
to bypass the global CONFIG_RTE_ENABLE_ASSERT option.
If RTE_LIBRTE_MLX4_DEBUG is disabled, the global CONFIG_RTE_ENABLE_ASSERT
can still make this assert active by calling RTE_VERIFY inside RTE_ASSERT.
Signed-off-by: Alexander Kozyrev <akozyrev@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
* mlx4 driver initialization.
*/
-#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <inttypes.h>
socket = rxq->socket;
}
- assert(data != NULL);
+ MLX4_ASSERT(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
if (!ret && size)
rte_errno = ENOMEM;
static void
mlx4_free_verbs_buf(void *ptr, void *data __rte_unused)
{
- assert(data != NULL);
+ MLX4_ASSERT(data != NULL);
rte_free(ptr);
}
#endif
mlx4_proc_priv_uninit(dev);
mlx4_mr_release(dev);
if (priv->pd != NULL) {
- assert(priv->ctx != NULL);
+ MLX4_ASSERT(priv->ctx != NULL);
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
claim_zero(mlx4_glue->close_device(priv->ctx));
} else
- assert(priv->ctx == NULL);
+ MLX4_ASSERT(priv->ctx == NULL);
mlx4_intr_uninstall(priv);
memset(priv, 0, sizeof(*priv));
}
if (mlx4_init_shared_data())
return -rte_errno;
sd = mlx4_shared_data;
- assert(sd);
+ MLX4_ASSERT(sd);
rte_spinlock_lock(&sd->lock);
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
strerror(rte_errno));
return -rte_errno;
}
- assert(pci_drv == &mlx4_driver);
+ MLX4_ASSERT(pci_drv == &mlx4_driver);
list = mlx4_glue->get_device_list(&i);
if (list == NULL) {
rte_errno = errno;
- assert(rte_errno);
+ MLX4_ASSERT(rte_errno);
if (rte_errno == ENOSYS)
ERROR("cannot list devices, is ib_uverbs loaded?");
return -rte_errno;
}
- assert(i >= 0);
+ MLX4_ASSERT(i >= 0);
/*
* For each listed device, check related sysfs entry against
* the provided PCI ID.
ERROR("cannot use device, are drivers up to date?");
return -rte_errno;
}
- assert(err > 0);
+ MLX4_ASSERT(err > 0);
rte_errno = err;
return -rte_errno;
}
err = ENODEV;
goto error;
}
- assert(device_attr.max_sge >= MLX4_MAX_SGE);
+ MLX4_ASSERT(device_attr.max_sge >= MLX4_MAX_SGE);
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
struct ibv_context *ctx = NULL;
#ifdef RTE_IBVERBS_LINK_DLOPEN
if (mlx4_glue_init())
return;
- assert(mlx4_glue);
+ MLX4_ASSERT(mlx4_glue);
#endif
#ifdef RTE_LIBRTE_MLX4_DEBUG
/* Glue structure must not contain any NULL pointers. */
unsigned int i;
for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
- assert(((const void *const *)mlx4_glue)[i]);
+ MLX4_ASSERT(((const void *const *)mlx4_glue)[i]);
}
#endif
if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
* Miscellaneous control operations for mlx4 driver.
*/
-#include <assert.h>
#include <dirent.h>
#include <errno.h>
#include <linux/ethtool.h>
fc_conf->mode = RTE_FC_NONE;
ret = 0;
out:
- assert(ret >= 0);
+ MLX4_ASSERT(ret >= 0);
return -ret;
}
}
ret = 0;
out:
- assert(ret >= 0);
+ MLX4_ASSERT(ret >= 0);
return -ret;
}
*/
#include <arpa/inet.h>
-#include <assert.h>
#include <errno.h>
#include <stdalign.h>
#include <stddef.h>
mask = item->mask ?
(const uint8_t *)item->mask :
(const uint8_t *)proc->mask_default;
- assert(mask);
+ MLX4_ASSERT(mask);
/*
* Single-pass check to make sure that:
* - Mask is supported, no bits are set outside proc->mask_support.
struct mlx4_drop *drop = priv->drop;
if (drop) {
- assert(drop->refcnt);
- assert(drop->priv == priv);
+ MLX4_ASSERT(drop->refcnt);
+ MLX4_ASSERT(drop->priv == priv);
++drop->refcnt;
return drop;
}
static void
mlx4_drop_put(struct mlx4_drop *drop)
{
- assert(drop->refcnt);
+ MLX4_ASSERT(drop->refcnt);
if (--drop->refcnt)
return;
drop->priv->drop = NULL;
mlx4_rss_detach(flow->rss);
return 0;
}
- assert(flow->ibv_attr);
+ MLX4_ASSERT(flow->ibv_attr);
if (!flow->internal &&
!priv->isolated &&
flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
}
qp = priv->drop->qp;
}
- assert(qp);
+ MLX4_ASSERT(qp);
if (flow->ibv_flow)
return 0;
flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
if (!flow->mac)
continue;
- assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
- assert(flow->ibv_attr->num_of_specs == 1);
- assert(eth->type == IBV_FLOW_SPEC_ETH);
- assert(flow->rss);
+ MLX4_ASSERT(flow->ibv_attr->type ==
+ IBV_FLOW_ATTR_NORMAL);
+ MLX4_ASSERT(flow->ibv_attr->num_of_specs == 1);
+ MLX4_ASSERT(eth->type == IBV_FLOW_SPEC_ETH);
+ MLX4_ASSERT(flow->rss);
if (rule_vlan &&
(eth->val.vlan_tag != *rule_vlan ||
eth->mask.vlan_tag != RTE_BE16(0x0fff)))
if (flow->promisc)
break;
} else {
- assert(ETH_DEV(priv)->data->all_multicast);
+ MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast);
if (flow->allmulti)
break;
}
}
if (flow && flow->internal) {
- assert(flow->rss);
+ MLX4_ASSERT(flow->rss);
if (flow->rss->queues != queues ||
memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
- assert(ETH_DEV(priv)->data->all_multicast);
+ MLX4_ASSERT(ETH_DEV(priv)->data->all_multicast);
pattern[1].spec = ð_allmulti;
pattern[1].mask = ð_allmulti;
}
goto error;
}
}
- assert(flow->promisc || flow->allmulti);
+ MLX4_ASSERT(flow->promisc || flow->allmulti);
flow->select = 1;
}
error:
return ret;
}
if (!priv->started)
- assert(!priv->drop);
+ MLX4_ASSERT(!priv->drop);
return 0;
}
while ((flow = LIST_FIRST(&priv->flows)))
mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
- assert(LIST_EMPTY(&priv->rss));
+ MLX4_ASSERT(LIST_EMPTY(&priv->rss));
}
static const struct rte_flow_ops mlx4_flow_ops = {
* Interrupts handling for mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
const struct rte_intr_conf *const intr_conf =
Ð_DEV(priv)->data->dev_conf.intr_conf;
- assert(priv->intr_alarm == 1);
+ MLX4_ASSERT(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
_rte_eth_dev_callback_process(ETH_DEV(priv),
* Copyright 2019 Mellanox Technologies, Ltd
*/
-#include <assert.h>
#include <stdio.h>
#include <time.h>
uint32_t lkey;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
ERROR("port %u invalid port ID", param->port_id);
struct rte_eth_dev *dev;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
ERROR("port %u invalid port ID", param->port_id);
int ret;
int i;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (!mlx4_shared_data->secondary_cnt)
return;
if (type != MLX4_MP_REQ_START_RXTX && type != MLX4_MP_REQ_STOP_RXTX) {
struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
mp_init_msg(dev, &mp_req, MLX4_MP_REQ_CREATE_MR);
req->args.addr = addr;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
dev->data->port_id);
return -rte_errno;
}
- assert(mp_rep.nb_received == 1);
+ MLX4_ASSERT(mp_rep.nb_received == 1);
mp_res = &mp_rep.msgs[0];
res = (struct mlx4_mp_param *)mp_res->param;
ret = res->result;
struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
mp_init_msg(dev, &mp_req, MLX4_MP_REQ_VERBS_CMD_FD);
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
dev->data->port_id);
return -rte_errno;
}
- assert(mp_rep.nb_received == 1);
+ MLX4_ASSERT(mp_rep.nb_received == 1);
mp_res = &mp_rep.msgs[0];
res = (struct mlx4_mp_param *)mp_res->param;
if (res->result) {
ret = -rte_errno;
goto exit;
}
- assert(mp_res->num_fds == 1);
+ MLX4_ASSERT(mp_res->num_fds == 1);
ret = mp_res->fds[0];
DEBUG("port %u command FD from primary is %d",
dev->data->port_id, ret);
{
int ret;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* primary is allowed to not support IPC */
ret = rte_mp_action_register(MLX4_MP_NAME, mp_primary_handle);
void
mlx4_mp_uninit_primary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
rte_mp_action_unregister(MLX4_MP_NAME);
}
int
mlx4_mp_init_secondary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
return rte_mp_action_register(MLX4_MP_NAME, mp_secondary_handle);
}
void
mlx4_mp_uninit_secondary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
rte_mp_action_unregister(MLX4_MP_NAME);
}
* Memory management functions for mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <stddef.h>
uint16_t n;
uint16_t base = 0;
- assert(bt != NULL);
+ MLX4_ASSERT(bt != NULL);
lkp_tbl = *bt->table;
n = bt->len;
/* First entry must be NULL for comparison. */
- assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
- lkp_tbl[0].lkey == UINT32_MAX));
+ MLX4_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
/* Binary search. */
do {
register uint16_t delta = n >> 1;
n -= delta;
}
} while (n > 1);
- assert(addr >= lkp_tbl[base].start);
+ MLX4_ASSERT(addr >= lkp_tbl[base].start);
*idx = base;
if (addr < lkp_tbl[base].end)
return lkp_tbl[base].lkey;
uint16_t idx = 0;
size_t shift;
- assert(bt != NULL);
- assert(bt->len <= bt->size);
- assert(bt->len > 0);
+ MLX4_ASSERT(bt != NULL);
+ MLX4_ASSERT(bt->len <= bt->size);
+ MLX4_ASSERT(bt->len > 0);
lkp_tbl = *bt->table;
/* Find out the slot for insertion. */
if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
if (mr->msl == NULL) {
struct ibv_mr *ibv_mr = mr->ibv_mr;
- assert(mr->ms_bmp_n == 1);
- assert(mr->ms_n == 1);
- assert(base_idx == 0);
+ MLX4_ASSERT(mr->ms_bmp_n == 1);
+ MLX4_ASSERT(mr->ms_n == 1);
+ MLX4_ASSERT(base_idx == 0);
/*
* Can't search it from memseg list but get it directly from
* verbs MR as there's only one chunk.
msl = mr->msl;
ms = rte_fbarray_get(&msl->memseg_arr,
mr->ms_base_idx + idx);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
if (!start)
start = ms->addr_64;
end = ms->addr_64 + ms->hugepage_sz;
if (mr != NULL)
lkey = entry->lkey;
}
- assert(lkey == UINT32_MAX || (addr >= entry->start &&
- addr < entry->end));
+ MLX4_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
return lkey;
}
struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/*
* MR can't be freed with holding the lock because rte_free() could call
* memory free callback function. This will be a deadlock situation.
/* Fill in output data. */
mr_lookup_dev(dev, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX4_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&priv->mr.rwlock);
DEBUG("port %u MR CREATED by primary process for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
}
alloc_resources:
/* Addresses must be page-aligned. */
- assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
- assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ MLX4_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ MLX4_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
msl = data.msl;
ms = rte_mem_virt2memseg((void *)data.start, msl);
len = data.end - data.start;
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
/* Number of memsegs in the range. */
ms_n = len / msl->page_sz;
DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
mr_free(mr);
goto alloc_resources;
}
- assert(data.msl == data_re.msl);
+ MLX4_ASSERT(data.msl == data_re.msl);
rte_rwlock_write_lock(&priv->mr.rwlock);
/*
* Check the address is really missing. If other thread already created
}
len = data.end - data.start;
mr->ms_bmp_n = len / msl->page_sz;
- assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ MLX4_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
/*
* Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
* called with holding the memory lock because it doesn't use
rte_errno = EINVAL;
goto err_mrlock;
}
- assert((uintptr_t)mr->ibv_mr->addr == data.start);
- assert(mr->ibv_mr->length == len);
+ MLX4_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+ MLX4_ASSERT(mr->ibv_mr->length == len);
LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
DEBUG("port %u MR CREATED (%p) for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
/* Fill in output data. */
mr_lookup_dev(dev, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX4_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_write_unlock(&priv->mr.rwlock);
rte_mcfg_mem_read_unlock();
return entry->lkey;
dev->data->port_id, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
- assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
- assert(len == RTE_ALIGN(len, msl->page_sz));
+ MLX4_ASSERT((uintptr_t)addr ==
+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ MLX4_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
ms_n = len / msl->page_sz;
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Clear bits of freed memsegs from MR. */
mr = mr_lookup_dev_list(dev, &entry, start);
if (mr == NULL)
continue;
- assert(mr->msl); /* Can't be external memory. */
+ MLX4_ASSERT(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
- assert(ms != NULL);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX4_ASSERT(ms != NULL);
+ MLX4_ASSERT(msl->page_sz == ms->hugepage_sz);
ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
pos = ms_idx - mr->ms_base_idx;
- assert(rte_bitmap_get(mr->ms_bmp, pos));
- assert(pos < mr->ms_bmp_n);
+ MLX4_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+ MLX4_ASSERT(pos < mr->ms_bmp_n);
DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
dev->data->port_id, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
struct mlx4_dev_list *dev_list = &mlx4_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_read_lock(&mlx4_shared_data->mem_event_rwlock);
struct mlx4_mr_cache entry;
uint32_t lkey;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
rte_rwlock_read_lock(&priv->mr.rwlock);
lkey = mr_lookup_dev(dev, &entry, addr);
* Rx queues configuration for mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
void
mlx4_rss_put(struct mlx4_rss *rss)
{
- assert(rss->refcnt);
+ MLX4_ASSERT(rss->refcnt);
if (--rss->refcnt)
return;
- assert(!rss->usecnt);
- assert(!rss->qp);
- assert(!rss->ind);
+ MLX4_ASSERT(!rss->usecnt);
+ MLX4_ASSERT(!rss->qp);
+ MLX4_ASSERT(!rss->ind);
LIST_REMOVE(rss, next);
rte_free(rss);
}
int
mlx4_rss_attach(struct mlx4_rss *rss)
{
- assert(rss->refcnt);
+ MLX4_ASSERT(rss->refcnt);
if (rss->usecnt++) {
- assert(rss->qp);
- assert(rss->ind);
+ MLX4_ASSERT(rss->qp);
+ MLX4_ASSERT(rss->ind);
return 0;
}
struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
- assert(rss->refcnt);
- assert(rss->qp);
- assert(rss->ind);
+ MLX4_ASSERT(rss->refcnt);
+ MLX4_ASSERT(rss->qp);
+ MLX4_ASSERT(rss->ind);
if (--rss->usecnt)
return;
claim_zero(mlx4_glue->destroy_qp(rss->qp));
/* Attach the configured Rx queues. */
if (rxq) {
- assert(!rxq->usecnt);
+ MLX4_ASSERT(!rxq->usecnt);
ret = mlx4_rxq_attach(rxq);
if (!ret) {
wq_num = rxq->wq->wq_num;
struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
- assert(rxq->usecnt == 1);
+ MLX4_ASSERT(rxq->usecnt == 1);
mlx4_rxq_detach(rxq);
}
}
mlx4_rxq_attach(struct rxq *rxq)
{
if (rxq->usecnt++) {
- assert(rxq->cq);
- assert(rxq->wq);
- assert(rxq->wqes);
- assert(rxq->rq_db);
+ MLX4_ASSERT(rxq->cq);
+ MLX4_ASSERT(rxq->wq);
+ MLX4_ASSERT(rxq->wqes);
+ MLX4_ASSERT(rxq->rq_db);
return 0;
}
unsigned int i;
int ret;
- assert(rte_is_power_of_2(elts_n));
+ MLX4_ASSERT(rte_is_power_of_2(elts_n));
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq;
cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
+ MLX4_ASSERT(buf->data_off == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
+ MLX4_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+ MLX4_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
/* Only the first segment keeps headroom. */
if (i % sges_n)
buf->data_off = 0;
.socket = socket,
};
/* Enable scattered packets support for this queue if necessary. */
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
ret = rte_errno;
mlx4_rx_queue_release(rxq);
rte_errno = ret;
- assert(rte_errno > 0);
+ MLX4_ASSERT(rte_errno > 0);
return -rte_errno;
}
ETH_DEV(priv)->data->rx_queues[i] = NULL;
break;
}
- assert(!rxq->cq);
- assert(!rxq->wq);
- assert(!rxq->wqes);
- assert(!rxq->rq_db);
+ MLX4_ASSERT(!rxq->cq);
+ MLX4_ASSERT(!rxq->wq);
+ MLX4_ASSERT(!rxq->wqes);
+ MLX4_ASSERT(!rxq->rq_db);
if (rxq->channel)
claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);
* Data plane functions for mlx4 driver.
*/
-#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
uint32_t stamp = sq->stamp;
int32_t size = (intptr_t)end - (intptr_t)start;
- assert(start != end);
+ MLX4_ASSERT(start != end);
/* Hold SQ ring wrap around. */
if (size < 0) {
size = (int32_t)sq->size + size;
volatile struct mlx4_wqe_ctrl_seg *ctrl;
struct txq_elt *elt;
- assert(txq->elts_comp_cd != 0);
+ MLX4_ASSERT(txq->elts_comp_cd != 0);
if (likely(max >= txq->elts_comp_cd_init))
mlx4_txq_complete(txq, elts_m, sq);
max = elts_n - max;
- assert(max >= 1);
- assert(max <= elts_n);
+ MLX4_ASSERT(max >= 1);
+ MLX4_ASSERT(max <= elts_n);
/* Always leave one free entry in the ring. */
--max;
if (max > pkts_n)
* ownership bit.
*/
rte_rmb();
- assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
- assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
- MLX4_CQE_OPCODE_ERROR);
+ MLX4_ASSERT(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
+ MLX4_ASSERT((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
+ MLX4_CQE_OPCODE_ERROR);
ret = rte_be_to_cpu_32(cqe->byte_cnt);
++cq->cons_index;
out:
break;
}
while (pkt != seg) {
- assert(pkt != (*rxq->elts)[idx]);
+ MLX4_ASSERT(pkt != (*rxq->elts)[idx]);
rep = pkt->next;
pkt->next = NULL;
pkt->nb_segs = 1;
goto skip;
}
pkt = seg;
- assert(len >= (rxq->crc_present << 2));
+ MLX4_ASSERT(len >= (rxq->crc_present << 2));
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
* Tx queues configuration for mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
struct mlx4_priv *priv = txq->priv;
struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv));
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
- assert(ppriv);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX4_ASSERT(ppriv);
ppriv->uar_table[txq->stats.idx] = txq->msq.db;
}
uintptr_t offset;
const size_t page_size = sysconf(_SC_PAGESIZE);
- assert(ppriv);
+ MLX4_ASSERT(ppriv);
/*
* As rdma-core, UARs are mapped in size of OS page
* size. Ref to libmlx4 function: mlx4_init_context()
unsigned int i;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
for (i = 0; i != txqs_n; ++i) {
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- assert(txq->stats.idx == (uint16_t)i);
+ MLX4_ASSERT(txq->stats.idx == (uint16_t)i);
ret = txq_uar_init_secondary(txq, fd);
if (ret)
goto error;
mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused,
int fd __rte_unused)
{
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
ERROR("UAR remap is not supported");
rte_errno = ENOTSUP;
return -rte_errno;
while (elts_tail != elts_head) {
struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m];
- assert(elt->buf != NULL);
+ MLX4_ASSERT(elt->buf != NULL);
rte_pktmbuf_free(elt->buf);
elt->buf = NULL;
elt->wqe = NULL;
ret = rte_errno;
mlx4_tx_queue_release(txq);
rte_errno = ret;
- assert(rte_errno > 0);
+ MLX4_ASSERT(rte_errno > 0);
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
return -rte_errno;
}
* Utility functions used by the mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stddef.h>
if (ret != -1 && !fcntl(fd, F_SETFL, ret | O_NONBLOCK))
return 0;
- assert(errno);
+ MLX4_ASSERT(errno);
rte_errno = errno;
return -rte_errno;
}
#ifndef MLX4_UTILS_H_
#define MLX4_UTILS_H_
-#include <assert.h>
#include <stddef.h>
#include <stdio.h>
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define MLX4_ASSERT(exp) RTE_VERIFY(exp)
+#define claim_zero(...) MLX4_ASSERT((__VA_ARGS__) == 0)
#else /* RTE_LIBRTE_MLX4_DEBUG */
/*
- * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
+ * Like MLX4_ASSERT(), DEBUG() becomes a no-op and claim_zero() does not perform
* any check when debugging is disabled.
*/
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
RTE_FMT_TAIL(__VA_ARGS__,)))
#define DEBUG(...) (void)0
+#define MLX4_ASSERT(exp) RTE_ASSERT(exp)
#define claim_zero(...) (__VA_ARGS__)
#endif /* RTE_LIBRTE_MLX4_DEBUG */