Use the MLX5_ASSERT macros instead of the standard assert clause.
Depends on the RTE_LIBRTE_MLX5_DEBUG configuration option to define it.
If RTE_LIBRTE_MLX5_DEBUG is enabled MLX5_ASSERT is equal to RTE_VERIFY
to bypass the global CONFIG_RTE_ENABLE_ASSERT option.
If RTE_LIBRTE_MLX5_DEBUG is disabled, the global CONFIG_RTE_ENABLE_ASSERT
can still make this assert active by calling RTE_VERIFY inside RTE_ASSERT.
Signed-off-by: Alexander Kozyrev <akozyrev@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
unsigned int i;
for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
- assert(((const void *const *)mlx5_glue)[i]);
+ MLX5_ASSERT(((const void *const *)mlx5_glue)[i]);
}
#endif
if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
#ifndef RTE_PMD_MLX5_COMMON_H_
#define RTE_PMD_MLX5_COMMON_H_
-#include <assert.h>
#include <stdio.h>
#include <rte_pci.h>
/* Bit-field manipulation. */
#define BITFIELD_DECLARE(bf, type, size) \
type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
- !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+ !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
#define BITFIELD_DEFINE(bf, type, size) \
BITFIELD_DECLARE((bf), type, (size)) = { 0 }
#define BITFIELD_SET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
#define BITFIELD_RESET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
- ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+ (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+ ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
#define BITFIELD_ISSET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
/*
* Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
#ifdef RTE_LIBRTE_MLX5_DEBUG
#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+#define MLX5_ASSERT(exp) RTE_VERIFY(exp)
+#define claim_zero(...) MLX5_ASSERT((__VA_ARGS__) == 0)
+#define claim_nonzero(...) MLX5_ASSERT((__VA_ARGS__) != 0)
#else /* RTE_LIBRTE_MLX5_DEBUG */
#define DEBUG(...) (void)0
+#define MLX5_ASSERT(exp) RTE_ASSERT(exp)
#define claim_zero(...) (__VA_ARGS__)
#define claim_nonzero(...) (__VA_ARGS__)
if (ret)
return ret;
}
- assert(rx_domain);
+ MLX5_ASSERT(rx_domain);
ret = mlx5_glue->dr_dump_domain(file, rx_domain);
if (ret)
return ret;
- assert(tx_domain);
+ MLX5_ASSERT(tx_domain);
ret = mlx5_glue->dr_dump_domain(file, tx_domain);
#else
ret = ENOTSUP;
MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
MLX5_ADAPTER_PAGE_SHIFT);
if (attr->sq_size) {
- RTE_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
+ MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
MLX5_SET(qpc, qpc, log_sq_size,
rte_log2_u32(attr->sq_size));
MLX5_SET(qpc, qpc, no_sq, 1);
}
if (attr->rq_size) {
- RTE_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
+ MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
MLX5_LOG_RQ_STRIDE_SHIFT);
int ret;
ret = mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 1);
- if (!ret)
+ if (!ret) {
+ MLX5_ASSERT((size_t)(index) < sizeof(mac_own) * CHAR_BIT);
BITFIELD_SET(mac_own, index);
+ }
if (ret == -EEXIST)
return 0;
return ret;
mlx5_nl_mac_addr_remove(int nlsk_fd, unsigned int iface_idx, uint64_t *mac_own,
struct rte_ether_addr *mac, uint32_t index)
{
+ MLX5_ASSERT((size_t)(index) < sizeof(mac_own) * CHAR_BIT);
BITFIELD_RESET(mac_own, index);
return mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 0);
}
for (i = n - 1; i >= 0; --i) {
struct rte_ether_addr *m = &mac_addrs[i];
+ MLX5_ASSERT((size_t)(i) < sizeof(mac_own) * CHAR_BIT);
if (BITFIELD_ISSET(mac_own, i))
mlx5_nl_mac_addr_remove(nlsk_fd, iface_idx, mac_own, m,
i);
uint32_t sn = MLX5_NL_SN_GENERATE;
int ret;
- assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+ MLX5_ASSERT(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
if (nlsk_fd < 0)
return 0;
ret = mlx5_nl_send(nlsk_fd, &req.hdr, sn);
/* We have some E-Switch configuration. */
mlx5_nl_check_switch_info(num_vf_set, &info);
}
- assert(!(info.master && info.representor));
+ MLX5_ASSERT(!(info.master && info.representor));
memcpy(arg, &info, sizeof(info));
return 0;
error:
nl_attr_put(nlh, IFLA_VLAN_ID, &tag, sizeof(tag));
nl_attr_nest_end(nlh, na_vlan);
nl_attr_nest_end(nlh, na_info);
- assert(sizeof(buf) >= nlh->nlmsg_len);
+ MLX5_ASSERT(sizeof(buf) >= nlh->nlmsg_len);
ret = mlx5_nl_send(vmwa->nl_socket, nlh, sn);
if (ret >= 0)
ret = mlx5_nl_recv(vmwa->nl_socket, sn, NULL, NULL);
#ifndef RTE_PMD_MLX5_PRM_H_
#define RTE_PMD_MLX5_PRM_H_
-#include <assert.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#define MLX5_SET64(typ, p, fld, v) \
do { \
- assert(__mlx5_bit_sz(typ, fld) == 64); \
+ MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \
*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
rte_cpu_to_be_64(v); \
} while (0)
#include <stddef.h>
#include <unistd.h>
#include <string.h>
-#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
if (pool->curr == pool->last) {
size = pool->curr - pool->free_arr;
size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
- assert(size2 > size);
+ MLX5_ASSERT(size2 > size);
mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
if (!mem) {
DRV_LOG(ERR, "can't allocate mem for id pool");
char *env;
int value;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Get environment variable to store. */
env = getenv(MLX5_SHUT_UP_BF);
value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
static void
mlx5_restore_doorbell_mapping_env(int value)
{
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Restore the original environment variable state. */
if (value == MLX5_ARG_UNSET)
unsetenv(MLX5_SHUT_UP_BF);
struct mlx5_devx_tis_attr tis_attr = { 0 };
#endif
- assert(spawn);
+ MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
pthread_mutex_lock(&mlx5_ibv_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_ibv_list, next) {
}
}
/* No device found, we have to create new shared context. */
- assert(spawn->max_port);
+ MLX5_ASSERT(spawn->max_port);
sh = rte_zmalloc("ethdev shared ib context",
sizeof(struct mlx5_ibv_shared) +
spawn->max_port *
return sh;
error:
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
- assert(sh);
+ MLX5_ASSERT(sh);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
rte_free(sh);
- assert(err > 0);
+ MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
LIST_FOREACH(lctx, &mlx5_ibv_list, next)
if (lctx == sh)
break;
- assert(lctx);
+ MLX5_ASSERT(lctx);
if (lctx != sh) {
DRV_LOG(ERR, "Freeing non-existing shared IB context");
goto exit;
}
#endif
- assert(sh);
- assert(sh->refcnt);
+ MLX5_ASSERT(sh);
+ MLX5_ASSERT(sh->refcnt);
/* Secondary process should not free the shared context. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (--sh->refcnt)
goto exit;
/* Release created Memory Regions. */
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
- assert(!sh->intr_cnt);
+ MLX5_ASSERT(!sh->intr_cnt);
if (sh->intr_cnt)
mlx5_intr_callback_unregister
(&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
if (pos) {
tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
entry);
- assert(tbl_data);
+ MLX5_ASSERT(tbl_data);
mlx5_hlist_remove(sh->flow_tbls, pos);
rte_free(tbl_data);
}
char s[MLX5_HLIST_NAMESIZE];
int err = 0;
- assert(sh);
+ MLX5_ASSERT(sh);
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
if (!sh->flow_tbls) {
return;
priv->dr_shared = 0;
sh = priv->sh;
- assert(sh);
+ MLX5_ASSERT(sh);
#ifdef HAVE_MLX5DV_DR
- assert(sh->dv_refcnt);
+ MLX5_ASSERT(sh->dv_refcnt);
if (sh->dv_refcnt && --sh->dv_refcnt)
return;
if (sh->rx_domain) {
socket = ctrl->socket;
}
- assert(data != NULL);
+ MLX5_ASSERT(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
if (!ret && size)
rte_errno = ENOMEM;
static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
- assert(data != NULL);
+ MLX5_ASSERT(data != NULL);
rte_free(ptr);
}
mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_udp_tunnel *udp_tunnel)
{
- assert(udp_tunnel != NULL);
+ MLX5_ASSERT(udp_tunnel != NULL);
if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
udp_tunnel->udp_port == 4789)
return 0;
if (mlx5_init_shared_data())
return -rte_errno;
sd = mlx5_shared_data;
- assert(sd);
+ MLX5_ASSERT(sd);
rte_spinlock_lock(&sd->lock);
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
default:
meta = 0;
mark = 0;
- assert(false);
+ MLX5_ASSERT(false);
break;
}
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
; /* Empty. */
/* Find the first clear bit. */
j = rte_bsf64(~page->dbr_bitmap[i]);
- assert(i < (MLX5_DBR_PER_PAGE / 64));
+ MLX5_ASSERT(i < (MLX5_DBR_PER_PAGE / 64));
page->dbr_bitmap[i] |= (1 << j);
page->dbr_count++;
*dbr_page = page;
struct mlx5_dev_config *sh_conf = NULL;
uint16_t port_id;
- assert(sh);
+ MLX5_ASSERT(sh);
/* Nothing to compare for the single/first device. */
if (sh->refcnt == 1)
return 0;
* is permanent throughout the lifetime of device. So, we may store
* the ifindex here and use the cached value further.
*/
- assert(spawn->ifindex);
+ MLX5_ASSERT(spawn->ifindex);
priv->if_index = spawn->ifindex;
eth_dev->data->dev_private = priv;
priv->dev_data = eth_dev->data;
}
if (sh)
mlx5_free_shared_ibctx(sh);
- assert(err > 0);
+ MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
if (!file)
return -1;
/* Use safe format to check maximal buffer length. */
- assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
+ MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
char tmp_str[IF_NAMESIZE + 32];
struct rte_pci_addr pci_addr;
strerror(rte_errno));
return -rte_errno;
}
- assert(pci_drv == &mlx5_driver);
+ MLX5_ASSERT(pci_drv == &mlx5_driver);
errno = 0;
ibv_list = mlx5_glue->get_device_list(&ret);
if (!ibv_list) {
* it may be E-Switch master device and representors.
* We have to perform identification trough the ports.
*/
- assert(nl_rdma >= 0);
- assert(ns == 0);
- assert(nd == 1);
- assert(np);
+ MLX5_ASSERT(nl_rdma >= 0);
+ MLX5_ASSERT(ns == 0);
+ MLX5_ASSERT(nd == 1);
+ MLX5_ASSERT(np);
for (i = 1; i <= np; ++i) {
list[ns].max_port = np;
list[ns].ibv_port = i;
goto exit;
}
}
- assert(ns);
+ MLX5_ASSERT(ns);
/*
* Sort list to probe devices in natural order for users convenience
* (i.e. master first, then representors from lowest to highest ID).
close(nl_route);
if (list)
rte_free(list);
- assert(ibv_list);
+ MLX5_ASSERT(ibv_list);
mlx5_glue->free_device_list(ibv_list);
return ret;
}
*/
#include <stddef.h>
-#include <assert.h>
#include <inttypes.h>
#include <unistd.h>
#include <stdbool.h>
unsigned int dev_port_prev = ~0u;
char match[IF_NAMESIZE] = "";
- assert(ibdev_path);
+ MLX5_ASSERT(ibdev_path);
{
MKSTR(path, "%s/device/net", ibdev_path);
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int ifindex;
- assert(priv);
- assert(priv->sh);
+ MLX5_ASSERT(priv);
+ MLX5_ASSERT(priv->sh);
ifindex = mlx5_ifindex(dev);
if (!ifindex) {
if (!priv->representor)
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int ifindex;
- assert(priv);
- assert(priv->if_index);
+ MLX5_ASSERT(priv);
+ MLX5_ASSERT(priv->if_index);
ifindex = priv->if_index;
if (!ifindex)
rte_errno = ENXIO;
inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
MLX5_SEND_DEF_INLINE_LEN :
(unsigned int)config->txq_inline_max;
- assert(config->txq_inline_min >= 0);
+ MLX5_ASSERT(config->txq_inline_min >= 0);
inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
MLX5_ESEG_MIN_INLINE_SIZE -
priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) {
DRV_LOG(ERR, "can't update switch port ID"
" for bonding device");
- assert(false);
+ MLX5_ASSERT(false);
return -ENODEV;
}
info->switch_info.port_id |=
priv = dev->data->dev_private;
domain_id = priv->domain_id;
- assert(priv->representor);
+ MLX5_ASSERT(priv->representor);
MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
continue;
}
dev = &rte_eth_devices[sh->port[i].ih_port_id];
- assert(dev);
+ MLX5_ASSERT(dev);
if (dev->data->dev_conf.intr_conf.rmv)
_rte_eth_dev_callback_process
(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
mlx5_dev_interrupt_device_fatal(sh);
continue;
}
- assert(tmp && (tmp <= sh->max_port));
+ MLX5_ASSERT(tmp && (tmp <= sh->max_port));
if (!tmp) {
/* Unsupported devive level event. */
mlx5_glue->ack_async_event(&event);
/* Retrieve ethernet device descriptor. */
tmp = sh->port[tmp - 1].ih_port_id;
dev = &rte_eth_devices[tmp];
- assert(dev);
+ MLX5_ASSERT(dev);
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
dev->data->dev_conf.intr_conf.lsc) {
if (ret != -EAGAIN) {
DRV_LOG(INFO, "failed to unregister interrupt"
" handler (error: %d)", ret);
- assert(false);
+ MLX5_ASSERT(false);
return;
}
if (twait) {
* on first iteration.
*/
twait = rte_get_timer_hz();
- assert(twait);
+ MLX5_ASSERT(twait);
}
/*
* Timeout elapsed, show message (once a second) and retry.
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
pthread_mutex_lock(&sh->intr_mutex);
- assert(priv->ibv_port);
- assert(priv->ibv_port <= sh->max_port);
- assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ MLX5_ASSERT(priv->ibv_port);
+ MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+ MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
goto exit;
- assert(sh->port[priv->ibv_port - 1].ih_port_id ==
+ MLX5_ASSERT(sh->port[priv->ibv_port - 1].ih_port_id ==
(uint32_t)dev->data->port_id);
- assert(sh->intr_cnt);
+ MLX5_ASSERT(sh->intr_cnt);
sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
if (!sh->intr_cnt || --sh->intr_cnt)
goto exit;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
pthread_mutex_lock(&sh->intr_mutex);
- assert(priv->ibv_port);
- assert(priv->ibv_port <= sh->max_port);
- assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ MLX5_ASSERT(priv->ibv_port);
+ MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+ MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS)
goto exit;
- assert(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
- (uint32_t)dev->data->port_id);
+ MLX5_ASSERT(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
+ (uint32_t)dev->data->port_id);
sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
if (!sh->devx_intr_cnt || --sh->devx_intr_cnt)
goto exit;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
pthread_mutex_lock(&sh->intr_mutex);
- assert(priv->ibv_port);
- assert(priv->ibv_port <= sh->max_port);
- assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ MLX5_ASSERT(priv->ibv_port);
+ MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+ MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
/* The handler is already installed for this port. */
- assert(sh->intr_cnt);
+ MLX5_ASSERT(sh->intr_cnt);
goto exit;
}
if (sh->intr_cnt) {
goto exit;
}
/* No shared handler installed. */
- assert(sh->ctx->async_fd > 0);
+ MLX5_ASSERT(sh->ctx->async_fd > 0);
flags = fcntl(sh->ctx->async_fd, F_GETFL);
ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
pthread_mutex_lock(&sh->intr_mutex);
- assert(priv->ibv_port);
- assert(priv->ibv_port <= sh->max_port);
- assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+ MLX5_ASSERT(priv->ibv_port);
+ MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+ MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) {
/* The handler is already installed for this port. */
- assert(sh->devx_intr_cnt);
+ MLX5_ASSERT(sh->devx_intr_cnt);
goto exit;
}
if (sh->devx_intr_cnt) {
{
eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
- assert(dev != NULL);
+ MLX5_ASSERT(dev != NULL);
if (mlx5_check_vec_rx_support(dev) > 0) {
rx_pkt_burst = mlx5_rx_burst_vec;
DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
mlx5_sysfs_check_switch_info(device_dir, &data);
}
*info = data;
- assert(!(data.master && data.representor));
+ MLX5_ASSERT(!(data.master && data.representor));
if (data.master && data.representor) {
DRV_LOG(ERR, "ifindex %u device is recognized as master"
" and as representor", ifindex);
return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
REG_C_3;
case MLX5_MTR_COLOR:
- RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+ MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
return priv->mtr_color_reg;
case MLX5_COPY_MARK:
/*
}
return config->flow_mreg_c[id + start_reg - REG_C_0];
}
- assert(false);
+ MLX5_ASSERT(false);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "invalid feature name");
{
unsigned int i;
- assert(nic_mask);
+ MLX5_ASSERT(nic_mask);
for (i = 0; i < size; ++i)
if ((nic_mask[i] | mask[i]) != nic_mask[i])
return rte_flow_error_set(error, ENOTSUP,
const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- assert(dev->data->dev_started);
+ MLX5_ASSERT(dev->data->dev_started);
for (i = 0; i != flow->rss.queue_num; ++i) {
int idx = (*flow->rss.queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
MLX5_FLOW_LAYER_OUTER_L4;
int ret;
- assert(flow_mask);
+ MLX5_ASSERT(flow_mask);
if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
if (ret)
return 0;
- assert(qrss_id);
+ MLX5_ASSERT(qrss_id);
return qrss_id;
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->prepare(attr, items, actions, error);
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->translate(dev, dev_flow, attr, items, actions, error);
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
return fops->apply(dev, flow, error);
}
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type type = flow->drv_type;
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->remove(dev, flow);
}
enum mlx5_flow_drv_type type = flow->drv_type;
flow_mreg_split_qrss_release(dev, flow);
- assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(type);
fops->destroy(dev, flow);
}
static const struct rte_flow_item *
find_port_id_item(const struct rte_flow_item *item)
{
- assert(item);
+ MLX5_ASSERT(item);
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
return item;
{
int actions_n = 0;
- assert(mtr);
+ MLX5_ASSERT(mtr);
*mtr = 0;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
return NULL;
cp_mreg.src = ret;
/* Check if already registered. */
- assert(priv->mreg_cp_tbl);
+ MLX5_ASSERT(priv->mreg_cp_tbl);
mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
if (mcp_res) {
/* For non-default rule. */
if (mark_id != MLX5_DEFAULT_COPY_ID)
mcp_res->refcnt++;
- assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1);
+ MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
+ mcp_res->refcnt == 1);
return mcp_res;
}
/* Provide the full width of FLAG specific value. */
mcp_res->hlist_ent.key = mark_id;
ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
&mcp_res->hlist_ent);
- assert(!ret);
+ MLX5_ASSERT(!ret);
if (ret)
goto error;
return mcp_res;
if (!mcp_res || !priv->mreg_cp_tbl)
return;
if (flow->copy_applied) {
- assert(mcp_res->appcnt);
+ MLX5_ASSERT(mcp_res->appcnt);
flow->copy_applied = 0;
--mcp_res->appcnt;
if (!mcp_res->appcnt)
*/
if (--mcp_res->refcnt)
return;
- assert(mcp_res->flow);
+ MLX5_ASSERT(mcp_res->flow);
flow_list_destroy(dev, NULL, mcp_res->flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
rte_free(mcp_res);
if (!mcp_res || !flow->copy_applied)
return;
- assert(mcp_res->appcnt);
+ MLX5_ASSERT(mcp_res->appcnt);
--mcp_res->appcnt;
flow->copy_applied = 0;
if (!mcp_res->appcnt)
MLX5_DEFAULT_COPY_ID);
if (!mcp_res)
return;
- assert(mcp_res->flow);
+ MLX5_ASSERT(mcp_res->flow);
flow_list_destroy(dev, NULL, mcp_res->flow);
mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
rte_free(mcp_res);
actions_rx++;
set_tag = (void *)actions_rx;
set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
- assert(set_tag->id > REG_NONE);
+ MLX5_ASSERT(set_tag->id > REG_NONE);
set_tag->data = *flow_id;
tag_action->conf = set_tag;
/* Create Tx item list. */
tag_item = (void *)addr;
tag_item->data = *flow_id;
tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
- assert(set_tag->id > REG_NONE);
+ MLX5_ASSERT(set_tag->id > REG_NONE);
item->spec = tag_item;
addr += sizeof(struct mlx5_rte_flow_item_tag);
tag_item = (void *)addr;
external, error);
if (ret < 0)
goto exit;
- assert(dev_flow);
+ MLX5_ASSERT(dev_flow);
if (qrss) {
const struct rte_flow_attr q_attr = {
.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
*/
if (qrss_id) {
/* Not meter subflow. */
- assert(!mtr_sfx);
+ MLX5_ASSERT(!mtr_sfx);
/*
* Put unique id in prefix flow due to it is destroyed
* after suffix flow and id will be freed after there
external, error);
if (ret < 0)
goto exit;
- assert(dev_flow);
+ MLX5_ASSERT(dev_flow);
dev_flow->hash_fields = hash_fields;
}
ret = flow_create_split_meter(dev, flow, attr, items,
actions, external, error);
- assert(ret <= 0);
+ MLX5_ASSERT(ret <= 0);
return ret;
}
flow->drv_type = flow_get_drv_type(dev, attr);
if (hairpin_id != 0)
flow->hairpin_flow_id = hairpin_id;
- assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
- flow->drv_type < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+ flow->drv_type < MLX5_FLOW_TYPE_MAX);
flow->rss.queue = (void *)(flow + 1);
if (rss) {
/*
items, rss->types,
mlx5_support_expansion,
graph_root);
- assert(ret > 0 &&
+ MLX5_ASSERT(ret > 0 &&
(unsigned int)ret < sizeof(expand_buffer.buffer));
} else {
buf->entries = 1;
hairpin_id);
return NULL;
error:
- assert(flow);
+ MLX5_ASSERT(flow);
flow_mreg_del_copy_action(dev, flow);
ret = rte_errno; /* Save rte_errno before cleanup. */
if (flow->hairpin_flow_id)
mlx5_flow_id_release(priv->sh->flow_id_pool,
flow->hairpin_flow_id);
- assert(flow);
+ MLX5_ASSERT(flow);
flow_drv_destroy(dev, flow);
rte_free(flow);
rte_errno = ret; /* Restore rte_errno. */
const struct mlx5_flow_driver_ops *fops;
enum mlx5_flow_drv_type ftype = flow->drv_type;
- assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+ MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
fops = flow_get_drv_ops(ftype);
return fops->query(dev, flow, actions, data, error);
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
- assert(fdir_flow);
+ MLX5_ASSERT(fdir_flow);
TAILQ_FOREACH(flow, &priv->flows, next) {
if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
DRV_LOG(DEBUG, "port %u found FDIR flow %p",
NULL);
if (!flow)
goto error;
- assert(!flow->fdir);
+ MLX5_ASSERT(!flow->fdir);
flow->fdir = fdir_flow;
DRV_LOG(DEBUG, "port %u created FDIR flow %p",
dev->data->port_id, (void *)flow);
uint8_t next_protocol, uint64_t *item_flags,
int *tunnel)
{
- assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
- item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
if (next_protocol == IPPROTO_IPIP) {
*item_flags |= MLX5_FLOW_LAYER_IPIP;
*tunnel = 1;
int ret;
ret = pthread_mutex_lock(&sh->dv_mutex);
- assert(!ret);
+ MLX5_ASSERT(!ret);
(void)ret;
}
}
int ret;
ret = pthread_mutex_unlock(&sh->dv_mutex);
- assert(!ret);
+ MLX5_ASSERT(!ret);
(void)ret;
}
}
ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
break;
default:
- assert(false);
+ MLX5_ASSERT(false);
ret = 0;
break;
}
* The fields should be presented as in big-endian format either.
* Mask must be always present, it defines the actual field width.
*/
- assert(item->mask);
- assert(field->size);
+ MLX5_ASSERT(item->mask);
+ MLX5_ASSERT(field->size);
do {
unsigned int size_b;
unsigned int off_b;
off_b = rte_bsf32(mask);
size_b = sizeof(uint32_t) * CHAR_BIT -
off_b - __builtin_clz(mask);
- assert(size_b);
+ MLX5_ASSERT(size_b);
size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
actions[i].action_type = type;
actions[i].field = field->id;
/* Convert entire record to expected big-endian format. */
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
if (type == MLX5_MODIFICATION_TYPE_COPY) {
- assert(dcopy);
+ MLX5_ASSERT(dcopy);
actions[i].dst_field = dcopy->id;
actions[i].dst_offset =
(int)dcopy->offset < 0 ? off_b : dcopy->offset;
/* Convert entire record to big-endian format. */
actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
} else {
- assert(item->spec);
+ MLX5_ASSERT(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
field->offset, field->size);
/* Shift out the trailing masked bits from data. */
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
- assert(conf->id != REG_NONE);
- assert(conf->id < RTE_DIM(reg_to_field));
+ MLX5_ASSERT(conf->id != REG_NONE);
+ MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
actions[i].field = reg_to_field[conf->id];
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
if (ret < 0)
return ret;
- assert(ret != REG_NONE);
- assert((unsigned int)ret < RTE_DIM(reg_to_field));
+ MLX5_ASSERT(ret != REG_NONE);
+ MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
reg_type = reg_to_field[ret];
- assert(reg_type > 0);
+ MLX5_ASSERT(reg_type > 0);
reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t reg_c0 = priv->sh->dv_regc0_mask;
- assert(reg_c0);
- assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+ MLX5_ASSERT(reg_c0);
+ MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
if (conf->dst == REG_C_0) {
/* Copy to reg_c[0], within mask only. */
reg_dst.offset = rte_bsf32(reg_c0);
reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (reg < 0)
return reg;
- assert(reg > 0);
+ MLX5_ASSERT(reg > 0);
if (reg == REG_C_0) {
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0;
- assert(msk_c0);
+ MLX5_ASSERT(msk_c0);
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
shl_c0 = rte_bsf32(msk_c0);
#else
#endif
mask <<= shl_c0;
data <<= shl_c0;
- assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+ MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
}
reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
/* The routine expects parameters in memory as big-endian ones. */
ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
if (ret < 0)
return ret;
- assert(ret != REG_NONE);
+ MLX5_ASSERT(ret != REG_NONE);
return 0;
}
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return ret;
- assert(ret > 0);
+ MLX5_ASSERT(ret > 0);
if (action_flags & MLX5_FLOW_ACTION_MARK)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return ret;
- assert(ret > 0);
+ MLX5_ASSERT(ret > 0);
if (!mark)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
int cnt;
- assert(tbl);
+ MLX5_ASSERT(tbl);
cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
if (!cnt) {
tbl_data->jump.action =
DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
} else {
- assert(tbl_data->jump.action);
+ MLX5_ASSERT(tbl_data->jump.action);
DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
(void *)&tbl_data->jump, cnt);
}
MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
break;
default:
- assert(false);
+ MLX5_ASSERT(false);
break;
}
}
&rte_flow_item_mark_mask;
mask = mark->id & priv->sh->dv_mark_mask;
mark = (const void *)item->spec;
- assert(mark);
+ MLX5_ASSERT(mark);
value = mark->id & priv->sh->dv_mark_mask & mask;
if (mask) {
enum modify_reg reg;
/* Get the metadata register index for the mark. */
reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
- assert(reg > 0);
+ MLX5_ASSERT(reg > 0);
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
#endif
value <<= shl_c0;
mask <<= shl_c0;
- assert(msk_c0);
- assert(!(~msk_c0 & mask));
+ MLX5_ASSERT(msk_c0);
+ MLX5_ASSERT(!(~msk_c0 & mask));
}
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
uint32_t mask, value;
- assert(tag_v);
+ MLX5_ASSERT(tag_v);
value = tag_v->data;
mask = tag_m ? tag_m->data : UINT32_MAX;
if (tag_v->id == REG_C_0) {
const struct rte_flow_item_tag *tag_m = item->mask;
enum modify_reg reg;
- assert(tag_v);
+ MLX5_ASSERT(tag_v);
tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
/* Get the metadata register index for the tag. */
reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
- assert(reg > 0);
+ MLX5_ASSERT(reg > 0);
flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ibv_shared *sh = priv->sh;
- assert(tag);
+ MLX5_ASSERT(tag);
DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
action_flags |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- assert(flow->rss.queue);
+ MLX5_ASSERT(flow->rss.queue);
queue = actions->conf;
flow->rss.queue_num = 1;
(*flow->rss.queue)[0] = queue->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- assert(flow->rss.queue);
+ MLX5_ASSERT(flow->rss.queue);
rss = actions->conf;
if (flow->rss.queue)
memcpy((*flow->rss.queue), rss->queue,
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
/* of_vlan_push action handled this action */
- assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+ MLX5_ASSERT(action_flags &
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN);
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
match_value, NULL))
return -rte_errno;
}
- assert(!flow_dv_check_valid_spec(matcher.mask.buf,
- dev_flow->dv.value.buf));
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
+ dev_flow->dv.value.buf));
+#endif
dev_flow->layers = item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
- assert(flow->rss.queue);
+ MLX5_ASSERT(flow->rss.queue);
hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
- assert(matcher->matcher_object);
+ MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
flow->dv.encap_decap;
- assert(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
- assert(cache_resource->action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
flow->dv.modify_hdr;
- assert(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
flow->dv.port_id_action;
- assert(cache_resource->action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
flow->dv.push_vlan_res;
- assert(cache_resource->action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
NULL, "Meter object is being used.");
/* Get the meter profile. */
fmp = fm->profile;
- RTE_ASSERT(fmp);
+ MLX5_ASSERT(fmp);
/* Update dependencies. */
fmp->ref_cnt--;
/* Remove from the flow meter list. */
goto error;
}
if (!fm->ref_cnt++) {
- RTE_ASSERT(!fm->mfts->meter_action);
+ MLX5_ASSERT(!fm->mfts->meter_action);
fm->attr = *attr;
/* This also creates the meter object. */
fm->mfts->meter_action = mlx5_flow_meter_action_create(priv,
if (!fm->mfts->meter_action)
goto error_detach;
} else {
- RTE_ASSERT(fm->mfts->meter_action);
+ MLX5_ASSERT(fm->mfts->meter_action);
if (attr->transfer != fm->attr.transfer ||
attr->ingress != fm->attr.ingress ||
attr->egress != fm->attr.egress) {
{
const struct rte_flow_attr attr = { 0 };
- RTE_ASSERT(fm->ref_cnt);
+ MLX5_ASSERT(fm->ref_cnt);
if (--fm->ref_cnt)
return;
if (fm->mfts->meter_action)
TAILQ_FOREACH_SAFE(fm, fms, next, tmp) {
/* Meter object must not have any owner. */
- RTE_ASSERT(!fm->ref_cnt);
+ MLX5_ASSERT(!fm->ref_cnt);
/* Get meter profile. */
fmp = fm->profile;
if (fmp == NULL)
}
TAILQ_FOREACH_SAFE(fmp, fmps, next, tmp) {
/* Check unused. */
- RTE_ASSERT(!fmp->ref_cnt);
+ MLX5_ASSERT(!fmp->ref_cnt);
/* Remove from list. */
TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
rte_free(fmp);
if (!verbs)
return;
- assert(verbs->specs);
+ MLX5_ASSERT(verbs->specs);
dst = (void *)(verbs->specs + verbs->size);
memcpy(dst, src, size);
++verbs->attr->num_of_specs;
} else {
struct mlx5_hrxq *hrxq;
- assert(flow->rss.queue);
+ MLX5_ASSERT(flow->rss.queue);
hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
dev_flow->hash_fields,
*/
#include <stddef.h>
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <inttypes.h>
struct mlx5_priv *priv = dev->data->dev_private;
const int vf = priv->config.vf;
- assert(index < MLX5_MAX_MAC_ADDRESSES);
+ MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index]))
return;
if (vf)
const int vf = priv->config.vf;
unsigned int i;
- assert(index < MLX5_MAX_MAC_ADDRESSES);
+ MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
if (rte_is_zero_ether_addr(mac)) {
rte_errno = EINVAL;
return -rte_errno;
* Copyright 2019 Mellanox Technologies, Ltd
*/
-#include <assert.h>
#include <stdio.h>
#include <time.h>
uint32_t lkey;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
struct rte_eth_dev *dev;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
int ret;
int i;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (!mlx5_shared_data->secondary_cnt)
return;
if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) {
struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
mp_init_msg(dev, &mp_req, MLX5_MP_REQ_CREATE_MR);
req->args.addr = addr;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
dev->data->port_id);
return -rte_errno;
}
- assert(mp_rep.nb_received == 1);
+ MLX5_ASSERT(mp_rep.nb_received == 1);
mp_res = &mp_rep.msgs[0];
res = (struct mlx5_mp_param *)mp_res->param;
ret = res->result;
struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
mp_init_msg(dev, &mp_req, MLX5_MP_REQ_QUEUE_STATE_MODIFY);
req->args.state_modify = *sm;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
dev->data->port_id);
return -rte_errno;
}
- assert(mp_rep.nb_received == 1);
+ MLX5_ASSERT(mp_rep.nb_received == 1);
mp_res = &mp_rep.msgs[0];
res = (struct mlx5_mp_param *)mp_res->param;
ret = res->result;
struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
mp_init_msg(dev, &mp_req, MLX5_MP_REQ_VERBS_CMD_FD);
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
dev->data->port_id);
return -rte_errno;
}
- assert(mp_rep.nb_received == 1);
+ MLX5_ASSERT(mp_rep.nb_received == 1);
mp_res = &mp_rep.msgs[0];
res = (struct mlx5_mp_param *)mp_res->param;
if (res->result) {
ret = -rte_errno;
goto exit;
}
- assert(mp_res->num_fds == 1);
+ MLX5_ASSERT(mp_res->num_fds == 1);
ret = mp_res->fds[0];
DRV_LOG(DEBUG, "port %u command FD from primary is %d",
dev->data->port_id, ret);
{
int ret;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* primary is allowed to not support IPC */
ret = rte_mp_action_register(MLX5_MP_NAME, mp_primary_handle);
void
mlx5_mp_uninit_primary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
rte_mp_action_unregister(MLX5_MP_NAME);
}
int
mlx5_mp_init_secondary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
return rte_mp_action_register(MLX5_MP_NAME, mp_secondary_handle);
}
void
mlx5_mp_uninit_secondary(void)
{
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
rte_mp_action_unregister(MLX5_MP_NAME);
}
uint16_t n;
uint16_t base = 0;
- assert(bt != NULL);
+ MLX5_ASSERT(bt != NULL);
lkp_tbl = *bt->table;
n = bt->len;
/* First entry must be NULL for comparison. */
- assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
- lkp_tbl[0].lkey == UINT32_MAX));
+ MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
/* Binary search. */
do {
register uint16_t delta = n >> 1;
n -= delta;
}
} while (n > 1);
- assert(addr >= lkp_tbl[base].start);
+ MLX5_ASSERT(addr >= lkp_tbl[base].start);
*idx = base;
if (addr < lkp_tbl[base].end)
return lkp_tbl[base].lkey;
uint16_t idx = 0;
size_t shift;
- assert(bt != NULL);
- assert(bt->len <= bt->size);
- assert(bt->len > 0);
+ MLX5_ASSERT(bt != NULL);
+ MLX5_ASSERT(bt->len <= bt->size);
+ MLX5_ASSERT(bt->len > 0);
lkp_tbl = *bt->table;
/* Find out the slot for insertion. */
if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
rte_errno = EINVAL;
return -rte_errno;
}
- assert(!bt->table && !bt->size);
+ MLX5_ASSERT(!bt->table && !bt->size);
memset(bt, 0, sizeof(*bt));
bt->table = rte_calloc_socket("B-tree table",
n, sizeof(struct mlx5_mr_cache),
if (mr->msl == NULL) {
struct ibv_mr *ibv_mr = mr->ibv_mr;
- assert(mr->ms_bmp_n == 1);
- assert(mr->ms_n == 1);
- assert(base_idx == 0);
+ MLX5_ASSERT(mr->ms_bmp_n == 1);
+ MLX5_ASSERT(mr->ms_n == 1);
+ MLX5_ASSERT(base_idx == 0);
/*
* Can't search it from memseg list but get it directly from
* verbs MR as there's only one chunk.
msl = mr->msl;
ms = rte_fbarray_get(&msl->memseg_arr,
mr->ms_base_idx + idx);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
if (!start)
start = ms->addr_64;
end = ms->addr_64 + ms->hugepage_sz;
if (mr != NULL)
lkey = entry->lkey;
}
- assert(lkey == UINT32_MAX || (addr >= entry->start &&
- addr < entry->end));
+ MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
return lkey;
}
struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/*
* MR can't be freed with holding the lock because rte_free() could call
* memory free callback function. This will be a deadlock situation.
/* Fill in output data. */
mr_lookup_dev(priv->sh, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
DEBUG("port %u MR CREATED by primary process for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
}
alloc_resources:
/* Addresses must be page-aligned. */
- assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
- assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
msl = data.msl;
ms = rte_mem_virt2memseg((void *)data.start, msl);
len = data.end - data.start;
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
/* Number of memsegs in the range. */
ms_n = len / msl->page_sz;
DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
mr_free(mr);
goto alloc_resources;
}
- assert(data.msl == data_re.msl);
+ MLX5_ASSERT(data.msl == data_re.msl);
rte_rwlock_write_lock(&sh->mr.rwlock);
/*
* Check the address is really missing. If other thread already created
}
len = data.end - data.start;
mr->ms_bmp_n = len / msl->page_sz;
- assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
/*
* Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
* called with holding the memory lock because it doesn't use
rte_errno = EINVAL;
goto err_mrlock;
}
- assert((uintptr_t)mr->ibv_mr->addr == data.start);
- assert(mr->ibv_mr->length == len);
+ MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+ MLX5_ASSERT(mr->ibv_mr->length == len);
LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
DEBUG("port %u MR CREATED (%p) for %p:\n"
" [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
/* Fill in output data. */
mr_lookup_dev(sh, entry, addr);
/* Lookup can't fail. */
- assert(entry->lkey != UINT32_MAX);
+ MLX5_ASSERT(entry->lkey != UINT32_MAX);
rte_rwlock_write_unlock(&sh->mr.rwlock);
rte_mcfg_mem_read_unlock();
return entry->lkey;
sh->ibdev_name, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
- assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
- assert(len == RTE_ALIGN(len, msl->page_sz));
+ MLX5_ASSERT((uintptr_t)addr ==
+ RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
ms_n = len / msl->page_sz;
rte_rwlock_write_lock(&sh->mr.rwlock);
/* Clear bits of freed memsegs from MR. */
mr = mr_lookup_dev_list(sh, &entry, start);
if (mr == NULL)
continue;
- assert(mr->msl); /* Can't be external memory. */
+ MLX5_ASSERT(mr->msl); /* Can't be external memory. */
ms = rte_mem_virt2memseg((void *)start, msl);
- assert(ms != NULL);
- assert(msl->page_sz == ms->hugepage_sz);
+ MLX5_ASSERT(ms != NULL);
+ MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
pos = ms_idx - mr->ms_base_idx;
- assert(rte_bitmap_get(mr->ms_bmp, pos));
- assert(pos < mr->ms_bmp_n);
+ MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+ MLX5_ASSERT(pos < mr->ms_bmp_n);
DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
sh->ibdev_name, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
/* Must be called from the primary process. */
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
struct mlx5_mr_cache entry;
uint32_t lkey;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* If already registered, it should return. */
rte_rwlock_read_lock(&sh->mr.rwlock);
lkey = mr_lookup_dev(sh, &entry, addr);
#include <stdint.h>
#include <errno.h>
#include <string.h>
-#include <assert.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
pos = i % RTE_RETA_GROUP_SIZE;
if (((reta_conf[idx].mask >> i) & 0x1) == 0)
continue;
- assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
+ MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
(*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
}
if (dev->data->dev_started) {
*/
#include <stddef.h>
-#include <assert.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
++n;
}
/* Multi-Packet RQ can't be partially configured. */
- assert(n == 0 || n == n_ibv);
+ MLX5_ASSERT(n == 0 || n == n_ibv);
return n == n_ibv;
}
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
- assert(!buf->next);
+ MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+ MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
+ MLX5_ASSERT(!buf->next);
/* Only the first segment keeps headroom. */
if (i % sges_n)
SET_DATA_OFF(buf, 0);
rxq->port_id, rxq->idx);
if (rxq->mprq_bufs == NULL)
return;
- assert(mlx5_rxq_check_vec_support(rxq) < 0);
+ MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
if ((*rxq->mprq_bufs)[i] != NULL)
mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
{
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- assert(rxq_obj);
+ MLX5_ASSERT(rxq_obj);
rq_attr.state = MLX5_RQC_STATE_RST;
rq_attr.rq_state = MLX5_RQC_STATE_RDY;
mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
static int
mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
{
- assert(rxq_obj);
+ MLX5_ASSERT(rxq_obj);
if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
switch (rxq_obj->type) {
case MLX5_RXQ_OBJ_TYPE_IBV:
- assert(rxq_obj->wq);
- assert(rxq_obj->cq);
+ MLX5_ASSERT(rxq_obj->wq);
+ MLX5_ASSERT(rxq_obj->cq);
rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
break;
case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
- assert(rxq_obj->cq);
- assert(rxq_obj->rq);
+ MLX5_ASSERT(rxq_obj->cq);
+ MLX5_ASSERT(rxq_obj->rq);
rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
rxq_release_rq_resources(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
break;
case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
- assert(rxq_obj->rq);
+ MLX5_ASSERT(rxq_obj->rq);
rxq_obj_hairpin_release(rxq_obj);
break;
}
struct mlx5_rxq_obj *tmpl = NULL;
int ret = 0;
- assert(rxq_data);
- assert(!rxq_ctrl->obj);
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
int ret = 0;
struct mlx5dv_obj obj;
- assert(rxq_data);
- assert(!rxq_ctrl->obj);
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
return mlx5_rxq_obj_hairpin_new(dev, idx);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
if (strd_sz_n < rxq->strd_sz_n)
strd_sz_n = rxq->strd_sz_n;
}
- assert(strd_num_n && strd_sz_n);
+ MLX5_ASSERT(strd_num_n && strd_sz_n);
buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
MLX5_MAX_TCP_HDR_OFFSET)
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
- assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
+ MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
if (priv->max_lro_msg_size)
priv->max_lro_msg_size =
if (!(*priv->rxqs)[idx])
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- assert(rxq_ctrl->priv);
+ MLX5_ASSERT(rxq_ctrl->priv);
if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
* Copyright 2015-2019 Mellanox Technologies, Ltd
*/
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
byte_count = DATA_LEN(buf);
}
/* scat->addr must be able to store a pointer. */
- assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+ MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
break;
}
while (pkt != seg) {
- assert(pkt != (*rxq->elts)[idx]);
+ MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
rep = NEXT(pkt);
NEXT(pkt) = NULL;
NB_SEGS(pkt) = 1;
break;
}
pkt = seg;
- assert(len >= (rxq->crc_present << 2));
+ MLX5_ASSERT(len >= (rxq->crc_present << 2));
pkt->ol_flags &= EXT_ATTACHED_MBUF;
/* If compressed, take hash result from mini-CQE. */
rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
&((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
void *addr;
- assert(rep != NULL);
+ MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
/* Replace WQE. */
byte_cnt = ret;
strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
MLX5_MPRQ_STRIDE_NUM_SHIFT;
- assert(strd_cnt);
+ MLX5_ASSERT(strd_cnt);
consumed_strd += strd_cnt;
if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
continue;
/* mini-CQE for MPRQ doesn't have hash result. */
strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
}
- assert(strd_idx < strd_n);
- assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+ MLX5_ASSERT(strd_idx < strd_n);
+ MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
+ wq_mask));
lro_num_seg = cqe->lro_num_seg;
/*
* Currently configured to receive a packet per a stride. But if
break;
}
len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
- assert((int)len >= (rxq->crc_present << 2));
+ MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
/* Increment the refcnt of the whole chunk. */
rte_atomic16_add_return(&buf->refcnt, 1);
- assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
- strd_n + 1);
+ MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+ strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, headroom_sz);
/*
* MLX5 device doesn't use iova but it is necessary in a
buf_len, shinfo);
/* Set mbuf head-room. */
pkt->data_off = headroom_sz;
- assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
/*
* Prevent potential overflow due to MTU change through
* kernel interface.
* copying pointers to temporary array
* for rte_mempool_put_bulk() calls.
*/
- assert(pkts);
- assert(pkts_n);
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
for (;;) {
for (;;) {
/*
*/
mbuf = rte_pktmbuf_prefree_seg(*pkts);
if (likely(mbuf != NULL)) {
- assert(mbuf == *pkts);
+ MLX5_ASSERT(mbuf == *pkts);
if (likely(n_free != 0)) {
if (unlikely(pool != mbuf->pool))
/* From different pool. */
* This loop is implemented to avoid multiple
* inlining of rte_mempool_put_bulk().
*/
- assert(pool);
- assert(p_free);
- assert(n_free);
+ MLX5_ASSERT(pool);
+ MLX5_ASSERT(p_free);
+ MLX5_ASSERT(n_free);
/*
* Free the array of pre-freed mbufs
* belonging to the same memory pool.
{
uint16_t n_elts = tail - txq->elts_tail;
- assert(n_elts);
- assert(n_elts <= txq->elts_s);
+ MLX5_ASSERT(n_elts);
+ MLX5_ASSERT(n_elts <= txq->elts_s);
/*
* Implement a loop to support ring buffer wraparound
* with single inlining of mlx5_tx_free_mbuf().
part = txq->elts_s - (txq->elts_tail & txq->elts_m);
part = RTE_MIN(part, n_elts);
- assert(part);
- assert(part <= txq->elts_s);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
part, olx);
txq->elts_tail += part;
unsigned int part;
struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
- assert(pkts);
- assert(pkts_n);
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
part = txq->elts_s - (txq->elts_head & txq->elts_m);
- assert(part);
- assert(part <= txq->elts_s);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
/* This code is a good candidate for vectorizing with SIMD. */
rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
(void *)pkts,
tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
if (likely(tail != txq->elts_tail)) {
mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
+ MLX5_ASSERT(tail == txq->elts_tail);
}
}
}
if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
if (likely(ret != MLX5_CQE_STATUS_ERR)) {
/* No new CQEs in completion queue. */
- assert(ret == MLX5_CQE_STATUS_HW_OWN);
+ MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
break;
}
/*
continue;
}
/* Normal transmit completion. */
- assert(ci != txq->cq_pi);
- assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
+ MLX5_ASSERT(ci != txq->cq_pi);
+ MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) ==
+ cqe->wqe_counter);
++ci;
last_cqe = cqe;
/*
txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
#endif
/* A CQE slot must always be available. */
- assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
+ MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
}
}
* We should get here only if device support
* this feature correctly.
*/
- assert(txq->vlan_en);
+ MLX5_ASSERT(txq->vlan_en);
es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
loc->mbuf->vlan_tci);
} else {
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
/* Copy the rest two bytes from packet data. */
- assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
} else {
/* Fill the gap in the title WQEBB with inline data. */
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
/* Copy the rest two bytes from packet data. */
- assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
psrc += sizeof(uint16_t);
} else {
psrc += sizeof(rte_v128u32_t);
}
pdst = (uint8_t *)(es + 2);
- assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(pdst < (uint8_t *)txq->wqes_end);
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
if (!inlen) {
- assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
return (struct mlx5_wqe_dseg *)pdst;
}
/*
unsigned int part, dlen, copy = 0;
uint8_t *psrc;
- assert(len);
- assert(must <= len);
+ MLX5_ASSERT(len);
+ MLX5_ASSERT(must <= len);
do {
/* Allow zero length packets, must check first. */
dlen = rte_pktmbuf_data_len(loc->mbuf);
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
- assert(loc->mbuf_nseg > 1);
- assert(loc->mbuf);
+ MLX5_ASSERT(loc->mbuf_nseg > 1);
+ MLX5_ASSERT(loc->mbuf);
--loc->mbuf_nseg;
if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
unsigned int diff;
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
- assert(loc->mbuf_nseg >= 1);
+ MLX5_ASSERT(loc->mbuf_nseg >= 1);
--loc->mbuf_nseg;
}
return copy;
sizeof(struct rte_vlan_hdr) +
2 * RTE_ETHER_ADDR_LEN),
"invalid Ethernet Segment data size");
- assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
pdst = (uint8_t *)&es->inline_data;
if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
/* Implement VLAN tag insertion as part inline data. */
pdst += sizeof(struct rte_vlan_hdr);
tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
}
- assert(pdst < (uint8_t *)txq->wqes_end);
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
/*
* The WQEBB space availability is checked by caller.
* Here we should be aware of WQE ring buffer wraparound only.
*/
part = (uint8_t *)txq->wqes_end - pdst;
part = RTE_MIN(part, inlen - tlen);
- assert(part);
+ MLX5_ASSERT(part);
do {
unsigned int copy;
unsigned int olx __rte_unused)
{
- assert(len);
+ MLX5_ASSERT(len);
dseg->bcount = rte_cpu_to_be_32(len);
dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
{
uintptr_t dst, src;
- assert(len);
+ MLX5_ASSERT(len);
if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
dseg->bcount = rte_cpu_to_be_32(len);
dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
src = (uintptr_t)buf;
if (len & 0x08) {
#ifdef RTE_ARCH_STRICT_ALIGN
- assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+ MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
*(uint32_t *)dst = *(unaligned_uint32_t *)src;
dst += sizeof(uint32_t);
src += sizeof(uint32_t);
unsigned int part;
uint8_t *pdst;
- assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
(2 * RTE_ETHER_ADDR_LEN),
"invalid Data Segment data size");
pdst += MLX5_DSEG_MIN_INLINE_SIZE;
len -= MLX5_DSEG_MIN_INLINE_SIZE;
/* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
- assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
pdst = (uint8_t *)txq->wqes;
*(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
struct mlx5_wqe_dseg *restrict dseg;
unsigned int ds;
- assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
+ MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
loc->mbuf_nseg = NB_SEGS(loc->mbuf);
loc->mbuf_off = 0;
* Non-zero offset means there are some data
* remained in the packet.
*/
- assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
- assert(rte_pktmbuf_data_len(loc->mbuf));
+ MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
+ MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
loc->mbuf_off);
dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
dseg = (struct mlx5_wqe_dseg *)txq->wqes;
mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
/* Store the mbuf to be freed on completion. */
- assert(loc->elts_free);
+ MLX5_ASSERT(loc->elts_free);
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
--loc->elts_free;
++dseg;
(txq, loc, dseg,
rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
rte_pktmbuf_data_len(loc->mbuf), olx);
- assert(loc->elts_free);
+ MLX5_ASSERT(loc->elts_free);
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
--loc->elts_free;
++dseg;
inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
inlen > (dlen + vlan)))
return MLX5_TXCMP_CODE_ERROR;
- assert(inlen >= txq->inlen_mode);
+ MLX5_ASSERT(inlen >= txq->inlen_mode);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
struct mlx5_wqe *restrict wqe;
unsigned int ds, nseg;
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* No inline at all, it means the CPU cycles saving
* is prioritized at configuration, we should not
struct mlx5_wqe *restrict wqe;
unsigned int ds, inlen, dlen, vlan = 0;
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* First calculate data length to be inlined
* to estimate the required space for WQE.
/* Check against minimal length. */
if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
return MLX5_TXCMP_CODE_ERROR;
- assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
if (inlen > txq->inlen_send ||
loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
struct rte_mbuf *mbuf;
* inlining is required.
*/
if (txq->inlen_mode) {
- assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(txq->inlen_mode <= txq->inlen_send);
+ MLX5_ASSERT(txq->inlen_mode >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
inlen = txq->inlen_mode;
} else {
if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
do {
smlen = nxlen;
mbuf = NEXT(mbuf);
- assert(mbuf);
+ MLX5_ASSERT(mbuf);
nxlen = rte_pktmbuf_data_len(mbuf);
nxlen += smlen;
} while (unlikely(nxlen < inlen));
inlen = nxlen;
mbuf = NEXT(mbuf);
/* There should be not end of packet. */
- assert(mbuf);
+ MLX5_ASSERT(mbuf);
nxlen = inlen + rte_pktmbuf_data_len(mbuf);
} while (unlikely(nxlen < txq->inlen_send));
}
* Estimate the number of Data Segments conservatively,
* supposing no any mbufs is being freed during inlining.
*/
- assert(inlen <= txq->inlen_send);
+ MLX5_ASSERT(inlen <= txq->inlen_send);
ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
MLX5_ESEG_MIN_INLINE_SIZE +
MLX5_WSEG_SIZE +
struct mlx5_txq_local *restrict loc,
unsigned int olx)
{
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
enum mlx5_txcmp_code ret;
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* Estimate the number of free elts quickly but
* conservatively. Some segment may be fully inlined
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
struct mlx5_txq_local *restrict loc,
unsigned int olx)
{
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
unsigned int ds, dlen, hlen, ntcp, vlan = 0;
uint8_t *dptr;
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
return true;
}
unsigned int slen,
unsigned int olx __rte_unused)
{
- assert(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
unsigned int slen,
unsigned int olx __rte_unused)
{
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert((len % MLX5_WSEG_SIZE) == 0);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
* and sends single-segment packet with eMPW opcode
* without data inlining.
*/
- assert(!MLX5_TXOFF_CONFIG(INLINE));
- assert(MLX5_TXOFF_CONFIG(EMPW));
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
unsigned int slen = 0;
next_empw:
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
MLX5_MPW_MAX_PACKETS :
MLX5_EMPW_MAX_PACKETS);
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_SINGLE;
}
if (ret != MLX5_TXCMP_CODE_EMPW) {
- assert(false);
+ MLX5_ASSERT(false);
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
* - packets length (legacy MPW only)
*/
if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
- assert(loop);
+ MLX5_ASSERT(loop);
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
if (unlikely(!loc->elts_free ||
dseg = (struct mlx5_wqe_dseg *)txq->wqes;
}
/* eMPW is built successfully, update loop parameters. */
- assert(!loop);
- assert(pkts_n >= part);
+ MLX5_ASSERT(!loop);
+ MLX5_ASSERT(pkts_n >= part);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
return ret;
/* Continue sending eMPW batches. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
* and sends single-segment packet with eMPW opcode
* with data inlining.
*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert(MLX5_TXOFF_CONFIG(EMPW));
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
unsigned int room, part, nlim;
unsigned int slen = 0;
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
/*
* Limits the amount of packets in one WQE
* to improve CQE latency generation.
uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
unsigned int tlen;
- assert(room >= MLX5_WQE_DSEG_SIZE);
- assert((room % MLX5_WQE_DSEG_SIZE) == 0);
- assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
+ MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
+ MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
+ MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
/*
* Some Tx offloads may cause an error if
* packet is not long enough, check against
* mlx5_tx_able_to_empw() and packet
* fits into inline length guaranteed.
*/
- assert((dlen + sizeof(struct rte_vlan_hdr)) <=
- txq->inlen_empw);
+ MLX5_ASSERT((dlen +
+ sizeof(struct rte_vlan_hdr)) <=
+ txq->inlen_empw);
tlen += sizeof(struct rte_vlan_hdr);
if (room < tlen)
break;
dptr, dlen, olx);
}
tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
- assert(room >= tlen);
+ MLX5_ASSERT(room >= tlen);
room -= tlen;
/*
* Packet data are completely inlined,
* Not inlinable VLAN packets are
* proceeded outside of this routine.
*/
- assert(room >= MLX5_WQE_DSEG_SIZE);
+ MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
- assert(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags &
+ PKT_TX_VLAN_PKT));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= room;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_SINGLE;
}
if (ret != MLX5_TXCMP_CODE_EMPW) {
- assert(false);
+ MLX5_ASSERT(false);
part -= room;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
* We get here to close an existing eMPW
* session and start the new one.
*/
- assert(pkts_n);
+ MLX5_ASSERT(pkts_n);
part -= room;
if (unlikely(!part))
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_EXIT;
/* Continue the loop with new eMPW session. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
* Subroutine is the part of mlx5_tx_burst_single()
* and sends single-segment packet with SEND opcode.
*/
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
struct mlx5_wqe *restrict wqe;
enum mlx5_txcmp_code ret;
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (MLX5_TXOFF_CONFIG(INLINE)) {
unsigned int inlen, vlan = 0;
* Otherwise we would do extra check for data
* size to avoid crashes due to length overflow.
*/
- assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_send >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
if (inlen <= txq->inlen_send) {
unsigned int seg_n, wqe_n;
* WQE ring buffer to inline partially.
*/
single_min_inline:
- assert(txq->inlen_send >= txq->inlen_mode);
- assert(inlen > txq->inlen_mode);
- assert(txq->inlen_mode >=
- MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
+ MLX5_ASSERT(inlen > txq->inlen_mode);
+ MLX5_ASSERT(txq->inlen_mode >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* We have to store mbuf in elts.*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
txq->elts[txq->elts_head++ & txq->elts_m] =
loc->mbuf;
--loc->elts_free;
* comparing with txq->inlen_send. We should
* not get overflow here.
*/
- assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
dptr, dlen, olx);
++txq->wqe_ci;
--loc->wqe_free;
/* We have to store mbuf in elts.*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
txq->elts[txq->elts_head++ & txq->elts_m] =
loc->mbuf;
--loc->elts_free;
* if no inlining is configured, this is done
* by calling routine in a batch copy.
*/
- assert(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
--loc->elts_free;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
return ret;
}
- assert(false);
+ MLX5_ASSERT(false);
}
static __rte_always_inline enum mlx5_txcmp_code
ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
if (ret == MLX5_TXCMP_CODE_SINGLE)
goto ordinary_send;
- assert(ret == MLX5_TXCMP_CODE_EMPW);
+ MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
for (;;) {
/* Optimize for inline/no inline eMPW send. */
ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
if (ret != MLX5_TXCMP_CODE_SINGLE)
return ret;
/* The resources to send one packet should remain. */
- assert(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
ordinary_send:
ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
- assert(ret != MLX5_TXCMP_CODE_SINGLE);
+ MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
if (ret != MLX5_TXCMP_CODE_EMPW)
return ret;
/* The resources to send one packet should remain. */
- assert(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
}
}
enum mlx5_txcmp_code ret;
unsigned int part;
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
if (unlikely(!pkts_n))
return 0;
loc.pkts_sent = 0;
* - data inlining into WQEs, one packet may require multiple
* WQEBBs, the WQEs become the limiting factor.
*/
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
loc.elts_free = txq->elts_s -
(uint16_t)(txq->elts_head - txq->elts_tail);
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
loc.wqe_free = txq->wqe_s -
(uint16_t)(txq->wqe_ci - txq->wqe_pi);
if (unlikely(!loc.elts_free || !loc.wqe_free))
* per WQE, do it in dedicated routine.
*/
enter_send_multi:
- assert(loc.pkts_sent >= loc.pkts_copy);
+ MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
part = loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
part, olx);
loc.pkts_copy = loc.pkts_sent;
}
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
if (!MLX5_TXOFF_CONFIG(INLINE))
loc.pkts_copy = loc.pkts_sent;
goto enter_send_tso;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* in dedicated branch.
*/
enter_send_tso:
- assert(NB_SEGS(loc.mbuf) == 1);
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
/*
* These returned code checks are supposed
goto enter_send_multi;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* offloads are requested at SQ configuration time).
*/
enter_send_single:
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
/*
* These returned code checks are supposed
goto enter_send_tso;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* - doorbell the hardware
* - copy the rest of mbufs to elts (if any)
*/
- assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
+ loc.pkts_sent >= loc.pkts_copy);
/* Take a shortcut if nothing is sent. */
if (unlikely(loc.pkts_sent == loc.pkts_loop))
goto burst_exit;
mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
loc.pkts_copy = loc.pkts_sent;
}
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
if (pkts_n > loc.pkts_sent) {
/*
* If burst size is large there might be no enough CQE
"invalid WQE Data Segment size");
static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
"invalid WQE size");
- assert(priv);
+ MLX5_ASSERT(priv);
if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
/* We should support Multi-Segment Packets. */
olx |= MLX5_TXOFF_CONFIG_MULTI;
* Copyright 2017 Mellanox Technologies, Ltd
*/
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
- assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
+ MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
+ MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+ MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) >
+ MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
#ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
#define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
const vector unsigned short cqe_sel_mask2 =
(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
- assert(rxq->sges_n == 0);
- assert(rxq->cqe_n == rxq->elts_n);
+ MLX5_ASSERT(rxq->sges_n == 0);
+ MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
cq = &(*rxq->cqes)[cq_idx];
rte_prefetch0(cq);
rte_prefetch0(cq + 1);
if (!pkts_n)
return rcvd_pkt;
/* At this point, there shouldn't be any remaining packets. */
- assert(rxq->decompressed == 0);
+ MLX5_ASSERT(rxq->decompressed == 0);
/*
* A. load first Qword (8bytes) in one loop.
if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
return rcvd_pkt;
/* Update the consumer indexes for non-compressed CQEs. */
- assert(nocmp_n <= pkts_n);
+ MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
rxq->rq_pi += nocmp_n;
rcvd_pkt += nocmp_n;
#endif
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
- assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
rxq->decompressed =
rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
/* Return more packets if needed. */
#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
#define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
};
const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
- assert(rxq->sges_n == 0);
- assert(rxq->cqe_n == rxq->elts_n);
+ MLX5_ASSERT(rxq->sges_n == 0);
+ MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
cq = &(*rxq->cqes)[cq_idx];
rte_prefetch_non_temporal(cq);
rte_prefetch_non_temporal(cq + 1);
if (!pkts_n)
return rcvd_pkt;
/* At this point, there shouldn't be any remained packets. */
- assert(rxq->decompressed == 0);
+ MLX5_ASSERT(rxq->decompressed == 0);
/*
* Note that vectors have reverse order - {v3, v2, v1, v0}, because
* there's no instruction to count trailing zeros. __builtin_clzl() is
if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
return rcvd_pkt;
/* Update the consumer indexes for non-compressed CQEs. */
- assert(nocmp_n <= pkts_n);
+ MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
rxq->rq_pi += nocmp_n;
rcvd_pkt += nocmp_n;
#endif
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
- assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
&elts[nocmp_n]);
/* Return more packets if needed. */
#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
#define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
rxq->crc_present * RTE_ETHER_CRC_LEN);
const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
- assert(rxq->sges_n == 0);
- assert(rxq->cqe_n == rxq->elts_n);
+ MLX5_ASSERT(rxq->sges_n == 0);
+ MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
cq = &(*rxq->cqes)[cq_idx];
rte_prefetch0(cq);
rte_prefetch0(cq + 1);
if (!pkts_n)
return rcvd_pkt;
/* At this point, there shouldn't be any remained packets. */
- assert(rxq->decompressed == 0);
+ MLX5_ASSERT(rxq->decompressed == 0);
/*
* A. load first Qword (8bytes) in one loop.
* B. copy 4 mbuf pointers from elts ring to returing pkts.
if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
return rcvd_pkt;
/* Update the consumer indexes for non-compressed CQEs. */
- assert(nocmp_n <= pkts_n);
+ MLX5_ASSERT(nocmp_n <= pkts_n);
rxq->cq_ci += nocmp_n;
rxq->rq_pi += nocmp_n;
rcvd_pkt += nocmp_n;
#endif
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
- assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
&elts[nocmp_n]);
/* Return more packets if needed. */
static int
mlx5_pmd_interrupt_handler_install(void)
{
- assert(server_socket);
+ MLX5_ASSERT(server_socket);
server_intr_handle.fd = server_socket;
server_intr_handle.type = RTE_INTR_HANDLE_EXT;
return rte_intr_callback_register(&server_intr_handle,
int ret = -1;
int flags;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
if (server_socket)
return 0;
/*
xstats_ctrl->info[idx] = mlx5_counters_init[i];
}
}
- assert(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
+ MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
xstats_ctrl->stats_n = dev_stats_n;
/* Copy to base at first time. */
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
*/
#include <stddef.h>
-#include <assert.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
while (elts_tail != elts_head) {
struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
- assert(elt != NULL);
+ MLX5_ASSERT(elt != NULL);
rte_pktmbuf_free_seg(elt);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Poisoning. */
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
- assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
- assert(ppriv);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(ppriv);
ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
txq_uar_ncattr_init(txq_ctrl, page_size);
#ifndef RTE_ARCH_64
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return 0;
- assert(ppriv);
+ MLX5_ASSERT(ppriv);
/*
* As rdma-core, UARs are mapped in size of OS page
* size. Ref to libmlx5 function: mlx5_init_context()
unsigned int i;
int ret;
- assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
for (i = 0; i != priv->txqs_n; ++i) {
if (!(*priv->txqs)[i])
continue;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
continue;
- assert(txq->idx == (uint16_t)i);
+ MLX5_ASSERT(txq->idx == (uint16_t)i);
ret = txq_uar_init_secondary(txq_ctrl, fd);
if (ret)
goto error;
struct mlx5_txq_obj *tmpl = NULL;
int ret = 0;
- assert(txq_data);
- assert(!txq_ctrl->obj);
+ MLX5_ASSERT(txq_data);
+ MLX5_ASSERT(!txq_ctrl->obj);
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
txq_ctrl->socket);
if (!tmpl) {
if (priv->config.devx && !priv->sh->tdn)
qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
#endif
- assert(txq_data);
+ MLX5_ASSERT(txq_data);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
int
mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
{
- assert(txq_obj);
+ MLX5_ASSERT(txq_obj);
if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
if (txq_obj->tis)
* beginning of inlining buffer in Ethernet
* Segment.
*/
- assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(inlen_send <= MLX5_WQE_SIZE_MAX +
- MLX5_ESEG_MIN_INLINE_SIZE -
- MLX5_WQE_CSEG_SIZE -
- MLX5_WQE_ESEG_SIZE -
- MLX5_WQE_DSEG_SIZE * 2);
+ MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
+ MLX5_ESEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE * 2);
} else if (inlen_mode) {
/*
* If minimal inlining is requested we must
PORT_ID(priv), inlen_empw, temp);
inlen_empw = temp;
}
- assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
- MLX5_DSEG_MIN_INLINE_SIZE -
- MLX5_WQE_CSEG_SIZE -
- MLX5_WQE_ESEG_SIZE -
- MLX5_WQE_DSEG_SIZE);
+ MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
+ MLX5_DSEG_MIN_INLINE_SIZE -
+ MLX5_WQE_CSEG_SIZE -
+ MLX5_WQE_ESEG_SIZE -
+ MLX5_WQE_DSEG_SIZE);
txq_ctrl->txq.inlen_empw = inlen_empw;
}
txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
}
txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
txq_ctrl->txq.inlen_empw);
- assert(txq_ctrl->max_inline_data <= max_inline);
- assert(txq_ctrl->txq.inlen_mode <= max_inline);
- assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
- assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
- !txq_ctrl->txq.inlen_empw);
+ MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
+ MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
+ MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
+ MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
+ !txq_ctrl->txq.inlen_empw);
return 0;
error:
rte_errno = ENOMEM;
}
/* Save pointer of global generation number to check memory event. */
tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
- assert(desc > MLX5_TX_COMP_THRESH);
+ MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
tmpl->txq.offloads = conf->offloads |
dev->data->dev_conf.txmode.offloads;
tmpl->priv = priv;
struct mlx5_hlist_head *first;
struct mlx5_hlist_entry *node;
- assert(h);
+ MLX5_ASSERT(h);
idx = rte_hash_crc_8byte(key, 0) & h->mask;
first = &h->heads[idx];
LIST_FOREACH(node, first, next) {
struct mlx5_hlist_head *first;
struct mlx5_hlist_entry *node;
- assert(h && entry);
+ MLX5_ASSERT(h && entry);
idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
first = &h->heads[idx];
/* No need to reuse the lookup function. */
mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
struct mlx5_hlist_entry *entry)
{
- assert(entry && entry->next.le_prev);
+ MLX5_ASSERT(entry && entry->next.le_prev);
LIST_REMOVE(entry, next);
/* Set to NULL to get rid of removing action for more than once. */
entry->next.le_prev = NULL;
uint32_t idx;
struct mlx5_hlist_entry *entry;
- assert(h);
+ MLX5_ASSERT(h);
for (idx = 0; idx < h->table_sz; ++idx) {
/* no LIST_FOREACH_SAFE, using while instead */
while (!LIST_EMPTY(&h->heads[idx])) {
#include <stdint.h>
#include <stdio.h>
#include <limits.h>
-#include <assert.h>
#include <errno.h>
#include <mlx5_common.h>
#define bool _Bool
#endif
-/* Bit-field manipulation. */
-#define BITFIELD_DECLARE(bf, type, size) \
- type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
- !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
-#define BITFIELD_DEFINE(bf, type, size) \
- BITFIELD_DECLARE((bf), type, (size)) = { 0 }
-#define BITFIELD_SET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_RESET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
- ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_ISSET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
-
/* Convert a bit number to the corresponding 64-bit mask */
#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
#include <stddef.h>
#include <errno.h>
-#include <assert.h>
#include <stdint.h>
#include <unistd.h>
DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
- assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
+ MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
for (i = 0; (i != priv->vlan_filter_n); ++i)
if (priv->vlan_filter[i] == vlan_id)
break;
return -rte_errno;
}
if (i < priv->vlan_filter_n) {
- assert(priv->vlan_filter_n != 0);
+ MLX5_ASSERT(priv->vlan_filter_n != 0);
/* Enabling an existing VLAN filter has no effect. */
if (on)
goto out;
(priv->vlan_filter_n - i));
priv->vlan_filter[priv->vlan_filter_n] = 0;
} else {
- assert(i == priv->vlan_filter_n);
+ MLX5_ASSERT(i == priv->vlan_filter_n);
/* Disabling an unknown VLAN filter has no effect. */
if (!on)
goto out;
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
- assert(vlan->created);
- assert(priv->vmwa_context);
+ MLX5_ASSERT(vlan->created);
+ MLX5_ASSERT(priv->vmwa_context);
if (!vlan->created || !vmwa)
return;
vlan->created = 0;
- assert(vlan_dev[vlan->tag].refcnt);
+ MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
if (--vlan_dev[vlan->tag].refcnt == 0 &&
vlan_dev[vlan->tag].ifindex) {
mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
- assert(!vlan->created);
- assert(priv->vmwa_context);
+ MLX5_ASSERT(!vlan->created);
+ MLX5_ASSERT(priv->vmwa_context);
if (vlan->created || !vmwa)
return;
if (vlan_dev[vlan->tag].refcnt == 0) {
- assert(!vlan_dev[vlan->tag].ifindex);
+ MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
vlan_dev[vlan->tag].ifindex =
mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
vlan->tag);