common/mlx5: improve assert control
authorAlexander Kozyrev <akozyrev@mellanox.com>
Thu, 30 Jan 2020 16:14:40 +0000 (18:14 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 5 Feb 2020 08:51:21 +0000 (09:51 +0100)
Use the MLX5_ASSERT macros instead of the standard assert clause.
Depends on the RTE_LIBRTE_MLX5_DEBUG configuration option to define it.
If RTE_LIBRTE_MLX5_DEBUG is enabled MLX5_ASSERT is equal to RTE_VERIFY
to bypass the global CONFIG_RTE_ENABLE_ASSERT option.
If RTE_LIBRTE_MLX5_DEBUG is disabled, the global CONFIG_RTE_ENABLE_ASSERT
can still make this assert active by calling RTE_VERIFY inside RTE_ASSERT.

Signed-off-by: Alexander Kozyrev <akozyrev@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
28 files changed:
drivers/common/mlx5/mlx5_common.c
drivers/common/mlx5/mlx5_common.h
drivers/common/mlx5/mlx5_devx_cmds.c
drivers/common/mlx5/mlx5_nl.c
drivers/common/mlx5/mlx5_prm.h
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_meter.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_mac.c
drivers/net/mlx5/mlx5_mp.c
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_rss.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx_vec.c
drivers/net/mlx5/mlx5_rxtx_vec.h
drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
drivers/net/mlx5/mlx5_rxtx_vec_neon.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.h
drivers/net/mlx5/mlx5_socket.c
drivers/net/mlx5/mlx5_stats.c
drivers/net/mlx5/mlx5_txq.c
drivers/net/mlx5/mlx5_utils.c
drivers/net/mlx5/mlx5_utils.h
drivers/net/mlx5/mlx5_vlan.c

index 7849d86..610fb48 100644 (file)
@@ -308,7 +308,7 @@ RTE_INIT_PRIO(mlx5_glue_init, CLASS)
                unsigned int i;
 
                for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
-                       assert(((const void *const *)mlx5_glue)[i]);
+                       MLX5_ASSERT(((const void *const *)mlx5_glue)[i]);
        }
 #endif
        if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
index 884ec02..bc200e2 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef RTE_PMD_MLX5_COMMON_H_
 #define RTE_PMD_MLX5_COMMON_H_
 
-#include <assert.h>
 #include <stdio.h>
 
 #include <rte_pci.h>
 /* Bit-field manipulation. */
 #define BITFIELD_DECLARE(bf, type, size) \
        type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
-                !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
+               !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
 #define BITFIELD_DEFINE(bf, type, size) \
        BITFIELD_DECLARE((bf), type, (size)) = { 0 }
 #define BITFIELD_SET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
-               ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+       (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+               ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
 #define BITFIELD_RESET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
-               ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+       (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+               ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))
 #define BITFIELD_ISSET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
-            ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+       !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+               ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
 
 /*
  * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
@@ -102,12 +98,14 @@ pmd_drv_log_basename(const char *s)
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 
 #define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+#define MLX5_ASSERT(exp) RTE_VERIFY(exp)
+#define claim_zero(...) MLX5_ASSERT((__VA_ARGS__) == 0)
+#define claim_nonzero(...) MLX5_ASSERT((__VA_ARGS__) != 0)
 
 #else /* RTE_LIBRTE_MLX5_DEBUG */
 
 #define DEBUG(...) (void)0
+#define MLX5_ASSERT(exp) RTE_ASSERT(exp)
 #define claim_zero(...) (__VA_ARGS__)
 #define claim_nonzero(...) (__VA_ARGS__)
 
index b0803ac..d960bc9 100644 (file)
@@ -1126,11 +1126,11 @@ mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
                if (ret)
                        return ret;
        }
-       assert(rx_domain);
+       MLX5_ASSERT(rx_domain);
        ret = mlx5_glue->dr_dump_domain(file, rx_domain);
        if (ret)
                return ret;
-       assert(tx_domain);
+       MLX5_ASSERT(tx_domain);
        ret = mlx5_glue->dr_dump_domain(file, tx_domain);
 #else
        ret = ENOTSUP;
@@ -1400,7 +1400,7 @@ mlx5_devx_cmd_create_qp(struct ibv_context *ctx,
                MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
                         MLX5_ADAPTER_PAGE_SHIFT);
                if (attr->sq_size) {
-                       RTE_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
+                       MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
                        MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
                        MLX5_SET(qpc, qpc, log_sq_size,
                                 rte_log2_u32(attr->sq_size));
@@ -1408,7 +1408,7 @@ mlx5_devx_cmd_create_qp(struct ibv_context *ctx,
                        MLX5_SET(qpc, qpc, no_sq, 1);
                }
                if (attr->rq_size) {
-                       RTE_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
+                       MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
                        MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
                        MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
                                 MLX5_LOG_RQ_STRIDE_SHIFT);
index 6eb91be..549e787 100644 (file)
@@ -670,8 +670,10 @@ mlx5_nl_mac_addr_add(int nlsk_fd, unsigned int iface_idx,
        int ret;
 
        ret = mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 1);
-       if (!ret)
+       if (!ret) {
+               MLX5_ASSERT((size_t)(index) < sizeof(mac_own) * CHAR_BIT);
                BITFIELD_SET(mac_own, index);
+       }
        if (ret == -EEXIST)
                return 0;
        return ret;
@@ -698,6 +700,7 @@ int
 mlx5_nl_mac_addr_remove(int nlsk_fd, unsigned int iface_idx, uint64_t *mac_own,
                        struct rte_ether_addr *mac, uint32_t index)
 {
+       MLX5_ASSERT((size_t)(index) < sizeof(mac_own) * CHAR_BIT);
        BITFIELD_RESET(mac_own, index);
        return mlx5_nl_mac_addr_modify(nlsk_fd, iface_idx, mac, 0);
 }
@@ -769,6 +772,7 @@ mlx5_nl_mac_addr_flush(int nlsk_fd, unsigned int iface_idx,
        for (i = n - 1; i >= 0; --i) {
                struct rte_ether_addr *m = &mac_addrs[i];
 
+               MLX5_ASSERT((size_t)(i) < sizeof(mac_own) * CHAR_BIT);
                if (BITFIELD_ISSET(mac_own, i))
                        mlx5_nl_mac_addr_remove(nlsk_fd, iface_idx, mac_own, m,
                                                i);
@@ -812,7 +816,7 @@ mlx5_nl_device_flags(int nlsk_fd, unsigned int iface_idx, uint32_t flags,
        uint32_t sn = MLX5_NL_SN_GENERATE;
        int ret;
 
-       assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+       MLX5_ASSERT(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
        if (nlsk_fd < 0)
                return 0;
        ret = mlx5_nl_send(nlsk_fd, &req.hdr, sn);
@@ -1182,7 +1186,7 @@ mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
                /* We have some E-Switch configuration. */
                mlx5_nl_check_switch_info(num_vf_set, &info);
        }
-       assert(!(info.master && info.representor));
+       MLX5_ASSERT(!(info.master && info.representor));
        memcpy(arg, &info, sizeof(info));
        return 0;
 error:
@@ -1375,7 +1379,7 @@ mlx5_nl_vlan_vmwa_create(struct mlx5_nl_vlan_vmwa_context *vmwa,
        nl_attr_put(nlh, IFLA_VLAN_ID, &tag, sizeof(tag));
        nl_attr_nest_end(nlh, na_vlan);
        nl_attr_nest_end(nlh, na_info);
-       assert(sizeof(buf) >= nlh->nlmsg_len);
+       MLX5_ASSERT(sizeof(buf) >= nlh->nlmsg_len);
        ret = mlx5_nl_send(vmwa->nl_socket, nlh, sn);
        if (ret >= 0)
                ret = mlx5_nl_recv(vmwa->nl_socket, sn, NULL, NULL);
index b48cd0a..15940c4 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_PRM_H_
 #define RTE_PMD_MLX5_PRM_H_
 
-#include <assert.h>
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
 #ifdef PEDANTIC
@@ -548,7 +547,7 @@ typedef uint8_t u8;
 
 #define MLX5_SET64(typ, p, fld, v) \
        do { \
-               assert(__mlx5_bit_sz(typ, fld) == 64); \
+               MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \
                *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
                        rte_cpu_to_be_64(v); \
        } while (0)
index 33c0c82..f80e403 100644 (file)
@@ -6,7 +6,6 @@
 #include <stddef.h>
 #include <unistd.h>
 #include <string.h>
-#include <assert.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <errno.h>
@@ -297,7 +296,7 @@ mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id)
        if (pool->curr == pool->last) {
                size = pool->curr - pool->free_arr;
                size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
-               assert(size2 > size);
+               MLX5_ASSERT(size2 > size);
                mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
                if (!mem) {
                        DRV_LOG(ERR, "can't allocate mem for id pool");
@@ -443,7 +442,7 @@ mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
        char *env;
        int value;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        /* Get environment variable to store. */
        env = getenv(MLX5_SHUT_UP_BF);
        value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
@@ -458,7 +457,7 @@ mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
 static void
 mlx5_restore_doorbell_mapping_env(int value)
 {
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        /* Restore the original environment variable state. */
        if (value == MLX5_ARG_UNSET)
                unsetenv(MLX5_SHUT_UP_BF);
@@ -498,9 +497,9 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
        struct mlx5_devx_tis_attr tis_attr = { 0 };
 #endif
 
-       assert(spawn);
+       MLX5_ASSERT(spawn);
        /* Secondary process should not create the shared context. */
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        pthread_mutex_lock(&mlx5_ibv_list_mutex);
        /* Search for IB context by device name. */
        LIST_FOREACH(sh, &mlx5_ibv_list, next) {
@@ -510,7 +509,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
                }
        }
        /* No device found, we have to create new shared context. */
-       assert(spawn->max_port);
+       MLX5_ASSERT(spawn->max_port);
        sh = rte_zmalloc("ethdev shared ib context",
                         sizeof(struct mlx5_ibv_shared) +
                         spawn->max_port *
@@ -633,7 +632,7 @@ exit:
        return sh;
 error:
        pthread_mutex_unlock(&mlx5_ibv_list_mutex);
-       assert(sh);
+       MLX5_ASSERT(sh);
        if (sh->tis)
                claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
@@ -645,7 +644,7 @@ error:
        if (sh->flow_id_pool)
                mlx5_flow_id_pool_release(sh->flow_id_pool);
        rte_free(sh);
-       assert(err > 0);
+       MLX5_ASSERT(err > 0);
        rte_errno = err;
        return NULL;
 }
@@ -668,16 +667,16 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
        LIST_FOREACH(lctx, &mlx5_ibv_list, next)
                if (lctx == sh)
                        break;
-       assert(lctx);
+       MLX5_ASSERT(lctx);
        if (lctx != sh) {
                DRV_LOG(ERR, "Freeing non-existing shared IB context");
                goto exit;
        }
 #endif
-       assert(sh);
-       assert(sh->refcnt);
+       MLX5_ASSERT(sh);
+       MLX5_ASSERT(sh->refcnt);
        /* Secondary process should not free the shared context. */
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        if (--sh->refcnt)
                goto exit;
        /* Release created Memory Regions. */
@@ -693,7 +692,7 @@ mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
         *  Only primary process handles async device events.
         **/
        mlx5_flow_counters_mng_close(sh);
-       assert(!sh->intr_cnt);
+       MLX5_ASSERT(!sh->intr_cnt);
        if (sh->intr_cnt)
                mlx5_intr_callback_unregister
                        (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
@@ -749,7 +748,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
        if (pos) {
                tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
                                        entry);
-               assert(tbl_data);
+               MLX5_ASSERT(tbl_data);
                mlx5_hlist_remove(sh->flow_tbls, pos);
                rte_free(tbl_data);
        }
@@ -758,7 +757,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
        if (pos) {
                tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
                                        entry);
-               assert(tbl_data);
+               MLX5_ASSERT(tbl_data);
                mlx5_hlist_remove(sh->flow_tbls, pos);
                rte_free(tbl_data);
        }
@@ -768,7 +767,7 @@ mlx5_free_table_hash_list(struct mlx5_priv *priv)
        if (pos) {
                tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
                                        entry);
-               assert(tbl_data);
+               MLX5_ASSERT(tbl_data);
                mlx5_hlist_remove(sh->flow_tbls, pos);
                rte_free(tbl_data);
        }
@@ -792,7 +791,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
        char s[MLX5_HLIST_NAMESIZE];
        int err = 0;
 
-       assert(sh);
+       MLX5_ASSERT(sh);
        snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
        sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
        if (!sh->flow_tbls) {
@@ -983,9 +982,9 @@ mlx5_free_shared_dr(struct mlx5_priv *priv)
                return;
        priv->dr_shared = 0;
        sh = priv->sh;
-       assert(sh);
+       MLX5_ASSERT(sh);
 #ifdef HAVE_MLX5DV_DR
-       assert(sh->dv_refcnt);
+       MLX5_ASSERT(sh->dv_refcnt);
        if (sh->dv_refcnt && --sh->dv_refcnt)
                return;
        if (sh->rx_domain) {
@@ -1120,7 +1119,7 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
 
                socket = ctrl->socket;
        }
-       assert(data != NULL);
+       MLX5_ASSERT(data != NULL);
        ret = rte_malloc_socket(__func__, size, alignment, socket);
        if (!ret && size)
                rte_errno = ENOMEM;
@@ -1138,7 +1137,7 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
 static void
 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
 {
-       assert(data != NULL);
+       MLX5_ASSERT(data != NULL);
        rte_free(ptr);
 }
 
@@ -1157,7 +1156,7 @@ int
 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
                         struct rte_eth_udp_tunnel *udp_tunnel)
 {
-       assert(udp_tunnel != NULL);
+       MLX5_ASSERT(udp_tunnel != NULL);
        if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
            udp_tunnel->udp_port == 4789)
                return 0;
@@ -1674,7 +1673,7 @@ mlx5_init_once(void)
        if (mlx5_init_shared_data())
                return -rte_errno;
        sd = mlx5_shared_data;
-       assert(sd);
+       MLX5_ASSERT(sd);
        rte_spinlock_lock(&sd->lock);
        switch (rte_eal_process_type()) {
        case RTE_PROC_PRIMARY:
@@ -1856,7 +1855,7 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)
        default:
                meta = 0;
                mark = 0;
-               assert(false);
+               MLX5_ASSERT(false);
                break;
        }
        if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
@@ -1949,7 +1948,7 @@ mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
                ; /* Empty. */
        /* Find the first clear bit. */
        j = rte_bsf64(~page->dbr_bitmap[i]);
-       assert(i < (MLX5_DBR_PER_PAGE / 64));
+       MLX5_ASSERT(i < (MLX5_DBR_PER_PAGE / 64));
        page->dbr_bitmap[i] |= (1 << j);
        page->dbr_count++;
        *dbr_page = page;
@@ -2042,7 +2041,7 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
        struct mlx5_dev_config *sh_conf = NULL;
        uint16_t port_id;
 
-       assert(sh);
+       MLX5_ASSERT(sh);
        /* Nothing to compare for the single/first device. */
        if (sh->refcnt == 1)
                return 0;
@@ -2623,7 +2622,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
         * is permanent throughout the lifetime of device. So, we may store
         * the ifindex here and use the cached value further.
         */
-       assert(spawn->ifindex);
+       MLX5_ASSERT(spawn->ifindex);
        priv->if_index = spawn->ifindex;
        eth_dev->data->dev_private = priv;
        priv->dev_data = eth_dev->data;
@@ -2806,7 +2805,7 @@ error:
        }
        if (sh)
                mlx5_free_shared_ibctx(sh);
-       assert(err > 0);
+       MLX5_ASSERT(err > 0);
        rte_errno = err;
        return NULL;
 }
@@ -2909,7 +2908,7 @@ mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
        if (!file)
                return -1;
        /* Use safe format to check maximal buffer length. */
-       assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
+       MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
        while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
                char tmp_str[IF_NAMESIZE + 32];
                struct rte_pci_addr pci_addr;
@@ -3007,7 +3006,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        strerror(rte_errno));
                return -rte_errno;
        }
-       assert(pci_drv == &mlx5_driver);
+       MLX5_ASSERT(pci_drv == &mlx5_driver);
        errno = 0;
        ibv_list = mlx5_glue->get_device_list(&ret);
        if (!ibv_list) {
@@ -3128,10 +3127,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                 * it may be E-Switch master device and representors.
                 * We have to perform identification trough the ports.
                 */
-               assert(nl_rdma >= 0);
-               assert(ns == 0);
-               assert(nd == 1);
-               assert(np);
+               MLX5_ASSERT(nl_rdma >= 0);
+               MLX5_ASSERT(ns == 0);
+               MLX5_ASSERT(nd == 1);
+               MLX5_ASSERT(np);
                for (i = 1; i <= np; ++i) {
                        list[ns].max_port = np;
                        list[ns].ibv_port = i;
@@ -3306,7 +3305,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                        goto exit;
                }
        }
-       assert(ns);
+       MLX5_ASSERT(ns);
        /*
         * Sort list to probe devices in natural order for users convenience
         * (i.e. master first, then representors from lowest to highest ID).
@@ -3401,7 +3400,7 @@ exit:
                close(nl_route);
        if (list)
                rte_free(list);
-       assert(ibv_list);
+       MLX5_ASSERT(ibv_list);
        mlx5_glue->free_device_list(ibv_list);
        return ret;
 }
index b765636..86923aa 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <inttypes.h>
 #include <unistd.h>
 #include <stdbool.h>
@@ -141,7 +140,7 @@ mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE])
        unsigned int dev_port_prev = ~0u;
        char match[IF_NAMESIZE] = "";
 
-       assert(ibdev_path);
+       MLX5_ASSERT(ibdev_path);
        {
                MKSTR(path, "%s/device/net", ibdev_path);
 
@@ -226,8 +225,8 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
        struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int ifindex;
 
-       assert(priv);
-       assert(priv->sh);
+       MLX5_ASSERT(priv);
+       MLX5_ASSERT(priv->sh);
        ifindex = mlx5_ifindex(dev);
        if (!ifindex) {
                if (!priv->representor)
@@ -257,8 +256,8 @@ mlx5_ifindex(const struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int ifindex;
 
-       assert(priv);
-       assert(priv->if_index);
+       MLX5_ASSERT(priv);
+       MLX5_ASSERT(priv->if_index);
        ifindex = priv->if_index;
        if (!ifindex)
                rte_errno = ENXIO;
@@ -578,7 +577,7 @@ mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
                MLX5_SEND_DEF_INLINE_LEN :
                (unsigned int)config->txq_inline_max;
-       assert(config->txq_inline_min >= 0);
+       MLX5_ASSERT(config->txq_inline_min >= 0);
        inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
        inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
                               MLX5_ESEG_MIN_INLINE_SIZE -
@@ -657,7 +656,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                            priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) {
                                DRV_LOG(ERR, "can't update switch port ID"
                                             " for bonding device");
-                               assert(false);
+                               MLX5_ASSERT(false);
                                return -ENODEV;
                        }
                        info->switch_info.port_id |=
@@ -795,7 +794,7 @@ mlx5_find_master_dev(struct rte_eth_dev *dev)
 
        priv = dev->data->dev_private;
        domain_id = priv->domain_id;
-       assert(priv->representor);
+       MLX5_ASSERT(priv->representor);
        MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
                struct mlx5_priv *opriv =
                        rte_eth_devices[port_id].data->dev_private;
@@ -1234,7 +1233,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh)
                        continue;
                }
                dev = &rte_eth_devices[sh->port[i].ih_port_id];
-               assert(dev);
+               MLX5_ASSERT(dev);
                if (dev->data->dev_conf.intr_conf.rmv)
                        _rte_eth_dev_callback_process
                                (dev, RTE_ETH_EVENT_INTR_RMV, NULL);
@@ -1273,7 +1272,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
                        mlx5_dev_interrupt_device_fatal(sh);
                        continue;
                }
-               assert(tmp && (tmp <= sh->max_port));
+               MLX5_ASSERT(tmp && (tmp <= sh->max_port));
                if (!tmp) {
                        /* Unsupported devive level event. */
                        mlx5_glue->ack_async_event(&event);
@@ -1303,7 +1302,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
                /* Retrieve ethernet device descriptor. */
                tmp = sh->port[tmp - 1].ih_port_id;
                dev = &rte_eth_devices[tmp];
-               assert(dev);
+               MLX5_ASSERT(dev);
                if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
                     event.event_type == IBV_EVENT_PORT_ERR) &&
                        dev->data->dev_conf.intr_conf.lsc) {
@@ -1358,7 +1357,7 @@ mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
                if (ret != -EAGAIN) {
                        DRV_LOG(INFO, "failed to unregister interrupt"
                                      " handler (error: %d)", ret);
-                       assert(false);
+                       MLX5_ASSERT(false);
                        return;
                }
                if (twait) {
@@ -1379,7 +1378,7 @@ mlx5_intr_callback_unregister(const struct rte_intr_handle *handle,
                         * on first iteration.
                         */
                        twait = rte_get_timer_hz();
-                       assert(twait);
+                       MLX5_ASSERT(twait);
                }
                /*
                 * Timeout elapsed, show message (once a second) and retry.
@@ -1443,14 +1442,14 @@ mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
        pthread_mutex_lock(&sh->intr_mutex);
-       assert(priv->ibv_port);
-       assert(priv->ibv_port <= sh->max_port);
-       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       MLX5_ASSERT(priv->ibv_port);
+       MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+       MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
        if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
                goto exit;
-       assert(sh->port[priv->ibv_port - 1].ih_port_id ==
+       MLX5_ASSERT(sh->port[priv->ibv_port - 1].ih_port_id ==
                                        (uint32_t)dev->data->port_id);
-       assert(sh->intr_cnt);
+       MLX5_ASSERT(sh->intr_cnt);
        sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
        if (!sh->intr_cnt || --sh->intr_cnt)
                goto exit;
@@ -1479,13 +1478,13 @@ mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
        pthread_mutex_lock(&sh->intr_mutex);
-       assert(priv->ibv_port);
-       assert(priv->ibv_port <= sh->max_port);
-       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       MLX5_ASSERT(priv->ibv_port);
+       MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+       MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
        if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS)
                goto exit;
-       assert(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
-                                       (uint32_t)dev->data->port_id);
+       MLX5_ASSERT(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
+                   (uint32_t)dev->data->port_id);
        sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
        if (!sh->devx_intr_cnt || --sh->devx_intr_cnt)
                goto exit;
@@ -1523,12 +1522,12 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
        pthread_mutex_lock(&sh->intr_mutex);
-       assert(priv->ibv_port);
-       assert(priv->ibv_port <= sh->max_port);
-       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       MLX5_ASSERT(priv->ibv_port);
+       MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+       MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
        if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
                /* The handler is already installed for this port. */
-               assert(sh->intr_cnt);
+               MLX5_ASSERT(sh->intr_cnt);
                goto exit;
        }
        if (sh->intr_cnt) {
@@ -1538,7 +1537,7 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
                goto exit;
        }
        /* No shared handler installed. */
-       assert(sh->ctx->async_fd > 0);
+       MLX5_ASSERT(sh->ctx->async_fd > 0);
        flags = fcntl(sh->ctx->async_fd, F_GETFL);
        ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
        if (ret) {
@@ -1577,12 +1576,12 @@ mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
        pthread_mutex_lock(&sh->intr_mutex);
-       assert(priv->ibv_port);
-       assert(priv->ibv_port <= sh->max_port);
-       assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+       MLX5_ASSERT(priv->ibv_port);
+       MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+       MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
        if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) {
                /* The handler is already installed for this port. */
-               assert(sh->devx_intr_cnt);
+               MLX5_ASSERT(sh->devx_intr_cnt);
                goto exit;
        }
        if (sh->devx_intr_cnt) {
@@ -1713,7 +1712,7 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
 {
        eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
 
-       assert(dev != NULL);
+       MLX5_ASSERT(dev != NULL);
        if (mlx5_check_vec_rx_support(dev) > 0) {
                rx_pkt_burst = mlx5_rx_burst_vec;
                DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
@@ -1880,7 +1879,7 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
                mlx5_sysfs_check_switch_info(device_dir, &data);
        }
        *info = data;
-       assert(!(data.master && data.representor));
+       MLX5_ASSERT(!(data.master && data.representor));
        if (data.master && data.representor) {
                DRV_LOG(ERR, "ifindex %u device is recognized as master"
                             " and as representor", ifindex);
index adba168..144e07c 100644 (file)
@@ -402,7 +402,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                        return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
                               REG_C_3;
        case MLX5_MTR_COLOR:
-               RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+               MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
                return priv->mtr_color_reg;
        case MLX5_COPY_MARK:
                /*
@@ -447,7 +447,7 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
                }
                return config->flow_mreg_c[id + start_reg - REG_C_0];
        }
-       assert(false);
+       MLX5_ASSERT(false);
        return rte_flow_error_set(error, EINVAL,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, "invalid feature name");
@@ -606,7 +606,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
 {
        unsigned int i;
 
-       assert(nic_mask);
+       MLX5_ASSERT(nic_mask);
        for (i = 0; i < size; ++i)
                if ((nic_mask[i] | mask[i]) != nic_mask[i])
                        return rte_flow_error_set(error, ENOTSUP,
@@ -795,7 +795,7 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
        const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
        unsigned int i;
 
-       assert(dev->data->dev_started);
+       MLX5_ASSERT(dev->data->dev_started);
        for (i = 0; i != flow->rss.queue_num; ++i) {
                int idx = (*flow->rss.queue)[i];
                struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -1783,7 +1783,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                      MLX5_FLOW_LAYER_OUTER_L4;
        int ret;
 
-       assert(flow_mask);
+       MLX5_ASSERT(flow_mask);
        if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2314,7 +2314,7 @@ flow_qrss_get_id(struct rte_eth_dev *dev)
        ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
        if (ret)
                return 0;
-       assert(qrss_id);
+       MLX5_ASSERT(qrss_id);
        return qrss_id;
 }
 
@@ -2522,7 +2522,7 @@ flow_drv_prepare(const struct rte_flow *flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        return fops->prepare(attr, items, actions, error);
 }
@@ -2566,7 +2566,7 @@ flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        return fops->translate(dev, dev_flow, attr, items, actions, error);
 }
@@ -2593,7 +2593,7 @@ flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        return fops->apply(dev, flow, error);
 }
@@ -2615,7 +2615,7 @@ flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type type = flow->drv_type;
 
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        fops->remove(dev, flow);
 }
@@ -2637,7 +2637,7 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
        enum mlx5_flow_drv_type type = flow->drv_type;
 
        flow_mreg_split_qrss_release(dev, flow);
-       assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(type);
        fops->destroy(dev, flow);
 }
@@ -2675,7 +2675,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 static const struct rte_flow_item *
 find_port_id_item(const struct rte_flow_item *item)
 {
-       assert(item);
+       MLX5_ASSERT(item);
        for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
                if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
                        return item;
@@ -2777,7 +2777,7 @@ flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
 {
        int actions_n = 0;
 
-       assert(mtr);
+       MLX5_ASSERT(mtr);
        *mtr = 0;
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
@@ -2947,13 +2947,14 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
                return NULL;
        cp_mreg.src = ret;
        /* Check if already registered. */
-       assert(priv->mreg_cp_tbl);
+       MLX5_ASSERT(priv->mreg_cp_tbl);
        mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
        if (mcp_res) {
                /* For non-default rule. */
                if (mark_id != MLX5_DEFAULT_COPY_ID)
                        mcp_res->refcnt++;
-               assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1);
+               MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
+                           mcp_res->refcnt == 1);
                return mcp_res;
        }
        /* Provide the full width of FLAG specific value. */
@@ -3021,7 +3022,7 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
        mcp_res->hlist_ent.key = mark_id;
        ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
                                &mcp_res->hlist_ent);
-       assert(!ret);
+       MLX5_ASSERT(!ret);
        if (ret)
                goto error;
        return mcp_res;
@@ -3050,7 +3051,7 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
        if (!mcp_res || !priv->mreg_cp_tbl)
                return;
        if (flow->copy_applied) {
-               assert(mcp_res->appcnt);
+               MLX5_ASSERT(mcp_res->appcnt);
                flow->copy_applied = 0;
                --mcp_res->appcnt;
                if (!mcp_res->appcnt)
@@ -3062,7 +3063,7 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
         */
        if (--mcp_res->refcnt)
                return;
-       assert(mcp_res->flow);
+       MLX5_ASSERT(mcp_res->flow);
        flow_list_destroy(dev, NULL, mcp_res->flow);
        mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
        rte_free(mcp_res);
@@ -3115,7 +3116,7 @@ flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
 
        if (!mcp_res || !flow->copy_applied)
                return;
-       assert(mcp_res->appcnt);
+       MLX5_ASSERT(mcp_res->appcnt);
        --mcp_res->appcnt;
        flow->copy_applied = 0;
        if (!mcp_res->appcnt)
@@ -3141,7 +3142,7 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
                                            MLX5_DEFAULT_COPY_ID);
        if (!mcp_res)
                return;
-       assert(mcp_res->flow);
+       MLX5_ASSERT(mcp_res->flow);
        flow_list_destroy(dev, NULL, mcp_res->flow);
        mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
        rte_free(mcp_res);
@@ -3370,7 +3371,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        actions_rx++;
        set_tag = (void *)actions_rx;
        set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
-       assert(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NONE);
        set_tag->data = *flow_id;
        tag_action->conf = set_tag;
        /* Create Tx item list. */
@@ -3381,7 +3382,7 @@ flow_hairpin_split(struct rte_eth_dev *dev,
        tag_item = (void *)addr;
        tag_item->data = *flow_id;
        tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
-       assert(set_tag->id > REG_NONE);
+       MLX5_ASSERT(set_tag->id > REG_NONE);
        item->spec = tag_item;
        addr += sizeof(struct mlx5_rte_flow_item_tag);
        tag_item = (void *)addr;
@@ -3849,7 +3850,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                      external, error);
        if (ret < 0)
                goto exit;
-       assert(dev_flow);
+       MLX5_ASSERT(dev_flow);
        if (qrss) {
                const struct rte_flow_attr q_attr = {
                        .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
@@ -3889,7 +3890,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                 */
                if (qrss_id) {
                        /* Not meter subflow. */
-                       assert(!mtr_sfx);
+                       MLX5_ASSERT(!mtr_sfx);
                        /*
                         * Put unique id in prefix flow due to it is destroyed
                         * after suffix flow and id will be freed after there
@@ -3913,7 +3914,7 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
                                              external, error);
                if (ret < 0)
                        goto exit;
-               assert(dev_flow);
+               MLX5_ASSERT(dev_flow);
                dev_flow->hash_fields = hash_fields;
        }
 
@@ -4096,7 +4097,7 @@ flow_create_split_outer(struct rte_eth_dev *dev,
 
        ret = flow_create_split_meter(dev, flow, attr, items,
                                         actions, external, error);
-       assert(ret <= 0);
+       MLX5_ASSERT(ret <= 0);
        return ret;
 }
 
@@ -4190,8 +4191,8 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
        flow->drv_type = flow_get_drv_type(dev, attr);
        if (hairpin_id != 0)
                flow->hairpin_flow_id = hairpin_id;
-       assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
-              flow->drv_type < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+                   flow->drv_type < MLX5_FLOW_TYPE_MAX);
        flow->rss.queue = (void *)(flow + 1);
        if (rss) {
                /*
@@ -4211,7 +4212,7 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
                                          items, rss->types,
                                          mlx5_support_expansion,
                                          graph_root);
-               assert(ret > 0 &&
+               MLX5_ASSERT(ret > 0 &&
                       (unsigned int)ret < sizeof(expand_buffer.buffer));
        } else {
                buf->entries = 1;
@@ -4279,13 +4280,13 @@ error_before_flow:
                                     hairpin_id);
        return NULL;
 error:
-       assert(flow);
+       MLX5_ASSERT(flow);
        flow_mreg_del_copy_action(dev, flow);
        ret = rte_errno; /* Save rte_errno before cleanup. */
        if (flow->hairpin_flow_id)
                mlx5_flow_id_release(priv->sh->flow_id_pool,
                                     flow->hairpin_flow_id);
-       assert(flow);
+       MLX5_ASSERT(flow);
        flow_drv_destroy(dev, flow);
        rte_free(flow);
        rte_errno = ret; /* Restore rte_errno. */
@@ -4737,7 +4738,7 @@ flow_drv_query(struct rte_eth_dev *dev,
        const struct mlx5_flow_driver_ops *fops;
        enum mlx5_flow_drv_type ftype = flow->drv_type;
 
-       assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+       MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
        fops = flow_get_drv_ops(ftype);
 
        return fops->query(dev, flow, actions, data, error);
@@ -5002,7 +5003,7 @@ flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct rte_flow *flow = NULL;
 
-       assert(fdir_flow);
+       MLX5_ASSERT(fdir_flow);
        TAILQ_FOREACH(flow, &priv->flows, next) {
                if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
                        DRV_LOG(DEBUG, "port %u found FDIR flow %p",
@@ -5051,7 +5052,7 @@ flow_fdir_filter_add(struct rte_eth_dev *dev,
                                NULL);
        if (!flow)
                goto error;
-       assert(!flow->fdir);
+       MLX5_ASSERT(!flow->fdir);
        flow->fdir = fdir_flow;
        DRV_LOG(DEBUG, "port %u created FDIR flow %p",
                dev->data->port_id, (void *)flow);
index 17d6d7c..d51d17d 100644 (file)
@@ -200,8 +200,8 @@ mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
                          uint8_t next_protocol, uint64_t *item_flags,
                          int *tunnel)
 {
-       assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
-              item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+       MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+                   item->type == RTE_FLOW_ITEM_TYPE_IPV6);
        if (next_protocol == IPPROTO_IPIP) {
                *item_flags |= MLX5_FLOW_LAYER_IPIP;
                *tunnel = 1;
@@ -231,7 +231,7 @@ flow_dv_shared_lock(struct rte_eth_dev *dev)
                int ret;
 
                ret = pthread_mutex_lock(&sh->dv_mutex);
-               assert(!ret);
+               MLX5_ASSERT(!ret);
                (void)ret;
        }
 }
@@ -246,7 +246,7 @@ flow_dv_shared_unlock(struct rte_eth_dev *dev)
                int ret;
 
                ret = pthread_mutex_unlock(&sh->dv_mutex);
-               assert(!ret);
+               MLX5_ASSERT(!ret);
                (void)ret;
        }
 }
@@ -310,7 +310,7 @@ flow_dv_fetch_field(const uint8_t *data, uint32_t size)
                ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
                break;
        default:
-               assert(false);
+               MLX5_ASSERT(false);
                ret = 0;
                break;
        }
@@ -360,8 +360,8 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
         * The fields should be presented as in big-endian format either.
         * Mask must be always present, it defines the actual field width.
         */
-       assert(item->mask);
-       assert(field->size);
+       MLX5_ASSERT(item->mask);
+       MLX5_ASSERT(field->size);
        do {
                unsigned int size_b;
                unsigned int off_b;
@@ -383,7 +383,7 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                off_b = rte_bsf32(mask);
                size_b = sizeof(uint32_t) * CHAR_BIT -
                         off_b - __builtin_clz(mask);
-               assert(size_b);
+               MLX5_ASSERT(size_b);
                size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
                actions[i].action_type = type;
                actions[i].field = field->id;
@@ -392,14 +392,14 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
                /* Convert entire record to expected big-endian format. */
                actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
                if (type == MLX5_MODIFICATION_TYPE_COPY) {
-                       assert(dcopy);
+                       MLX5_ASSERT(dcopy);
                        actions[i].dst_field = dcopy->id;
                        actions[i].dst_offset =
                                (int)dcopy->offset < 0 ? off_b : dcopy->offset;
                        /* Convert entire record to big-endian format. */
                        actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
                } else {
-                       assert(item->spec);
+                       MLX5_ASSERT(item->spec);
                        data = flow_dv_fetch_field((const uint8_t *)item->spec +
                                                   field->offset, field->size);
                        /* Shift out the trailing masked bits from data. */
@@ -911,8 +911,8 @@ flow_dv_convert_action_set_reg
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "too many items to modify");
-       assert(conf->id != REG_NONE);
-       assert(conf->id < RTE_DIM(reg_to_field));
+       MLX5_ASSERT(conf->id != REG_NONE);
+       MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
        actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
        actions[i].field = reg_to_field[conf->id];
        actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
@@ -959,10 +959,10 @@ flow_dv_convert_action_set_tag
        ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
        if (ret < 0)
                return ret;
-       assert(ret != REG_NONE);
-       assert((unsigned int)ret < RTE_DIM(reg_to_field));
+       MLX5_ASSERT(ret != REG_NONE);
+       MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
        reg_type = reg_to_field[ret];
-       assert(reg_type > 0);
+       MLX5_ASSERT(reg_type > 0);
        reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
        return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
                                             MLX5_MODIFICATION_TYPE_SET, error);
@@ -1008,8 +1008,8 @@ flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
                struct mlx5_priv *priv = dev->data->dev_private;
                uint32_t reg_c0 = priv->sh->dv_regc0_mask;
 
-               assert(reg_c0);
-               assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+               MLX5_ASSERT(reg_c0);
+               MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
                if (conf->dst == REG_C_0) {
                        /* Copy to reg_c[0], within mask only. */
                        reg_dst.offset = rte_bsf32(reg_c0);
@@ -1088,7 +1088,7 @@ flow_dv_convert_action_mark(struct rte_eth_dev *dev,
        reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
        if (reg < 0)
                return reg;
-       assert(reg > 0);
+       MLX5_ASSERT(reg > 0);
        if (reg == REG_C_0) {
                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
                uint32_t shl_c0 = rte_bsf32(msk_c0);
@@ -1183,7 +1183,7 @@ flow_dv_convert_action_set_meta
                uint32_t msk_c0 = priv->sh->dv_regc0_mask;
                uint32_t shl_c0;
 
-               assert(msk_c0);
+               MLX5_ASSERT(msk_c0);
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
                shl_c0 = rte_bsf32(msk_c0);
 #else
@@ -1191,7 +1191,7 @@ flow_dv_convert_action_set_meta
 #endif
                mask <<= shl_c0;
                data <<= shl_c0;
-               assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+               MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
        }
        reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
        /* The routine expects parameters in memory as big-endian ones. */
@@ -1465,7 +1465,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev,
        ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
        if (ret < 0)
                return ret;
-       assert(ret != REG_NONE);
+       MLX5_ASSERT(ret != REG_NONE);
        return 0;
 }
 
@@ -1898,7 +1898,7 @@ flow_dv_validate_action_flag(struct rte_eth_dev *dev,
        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
        if (ret < 0)
                return ret;
-       assert(ret > 0);
+       MLX5_ASSERT(ret > 0);
        if (action_flags & MLX5_FLOW_ACTION_MARK)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -1958,7 +1958,7 @@ flow_dv_validate_action_mark(struct rte_eth_dev *dev,
        ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
        if (ret < 0)
                return ret;
-       assert(ret > 0);
+       MLX5_ASSERT(ret > 0);
        if (!mark)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, action,
@@ -2407,7 +2407,7 @@ flow_dv_jump_tbl_resource_register
                container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
        int cnt;
 
-       assert(tbl);
+       MLX5_ASSERT(tbl);
        cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
        if (!cnt) {
                tbl_data->jump.action =
@@ -2420,7 +2420,7 @@ flow_dv_jump_tbl_resource_register
                DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
                        (void *)&tbl_data->jump, cnt);
        } else {
-               assert(tbl_data->jump.action);
+               MLX5_ASSERT(tbl_data->jump.action);
                DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
                        (void *)&tbl_data->jump, cnt);
        }
@@ -6017,7 +6017,7 @@ flow_dv_match_meta_reg(void *matcher, void *key,
                MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
                break;
        default:
-               assert(false);
+               MLX5_ASSERT(false);
                break;
        }
 }
@@ -6048,14 +6048,14 @@ flow_dv_translate_item_mark(struct rte_eth_dev *dev,
                            &rte_flow_item_mark_mask;
        mask = mark->id & priv->sh->dv_mark_mask;
        mark = (const void *)item->spec;
-       assert(mark);
+       MLX5_ASSERT(mark);
        value = mark->id & priv->sh->dv_mark_mask & mask;
        if (mask) {
                enum modify_reg reg;
 
                /* Get the metadata register index for the mark. */
                reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
-               assert(reg > 0);
+               MLX5_ASSERT(reg > 0);
                if (reg == REG_C_0) {
                        struct mlx5_priv *priv = dev->data->dev_private;
                        uint32_t msk_c0 = priv->sh->dv_regc0_mask;
@@ -6123,8 +6123,8 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev,
 #endif
                        value <<= shl_c0;
                        mask <<= shl_c0;
-                       assert(msk_c0);
-                       assert(!(~msk_c0 & mask));
+                       MLX5_ASSERT(msk_c0);
+                       MLX5_ASSERT(!(~msk_c0 & mask));
                }
                flow_dv_match_meta_reg(matcher, key, reg, value, mask);
        }
@@ -6168,7 +6168,7 @@ flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
        const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
        uint32_t mask, value;
 
-       assert(tag_v);
+       MLX5_ASSERT(tag_v);
        value = tag_v->data;
        mask = tag_m ? tag_m->data : UINT32_MAX;
        if (tag_v->id == REG_C_0) {
@@ -6204,11 +6204,11 @@ flow_dv_translate_item_tag(struct rte_eth_dev *dev,
        const struct rte_flow_item_tag *tag_m = item->mask;
        enum modify_reg reg;
 
-       assert(tag_v);
+       MLX5_ASSERT(tag_v);
        tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
        /* Get the metadata register index for the tag. */
        reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
-       assert(reg > 0);
+       MLX5_ASSERT(reg > 0);
        flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
 }
 
@@ -6775,7 +6775,7 @@ flow_dv_tag_release(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ibv_shared *sh = priv->sh;
 
-       assert(tag);
+       MLX5_ASSERT(tag);
        DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
                dev->data->port_id, (void *)tag,
                rte_atomic32_read(&tag->refcnt));
@@ -7125,14 +7125,14 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        action_flags |= MLX5_FLOW_ACTION_DROP;
                        break;
                case RTE_FLOW_ACTION_TYPE_QUEUE:
-                       assert(flow->rss.queue);
+                       MLX5_ASSERT(flow->rss.queue);
                        queue = actions->conf;
                        flow->rss.queue_num = 1;
                        (*flow->rss.queue)[0] = queue->index;
                        action_flags |= MLX5_FLOW_ACTION_QUEUE;
                        break;
                case RTE_FLOW_ACTION_TYPE_RSS:
-                       assert(flow->rss.queue);
+                       MLX5_ASSERT(flow->rss.queue);
                        rss = actions->conf;
                        if (flow->rss.queue)
                                memcpy((*flow->rss.queue), rss->queue,
@@ -7206,7 +7206,8 @@ cnt_err:
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
                        /* of_vlan_push action handled this action */
-                       assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+                       MLX5_ASSERT(action_flags &
+                                   MLX5_FLOW_ACTION_OF_PUSH_VLAN);
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
                        if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
@@ -7631,8 +7632,10 @@ cnt_err:
                                                   match_value, NULL))
                        return -rte_errno;
        }
-       assert(!flow_dv_check_valid_spec(matcher.mask.buf,
-                                        dev_flow->dv.value.buf));
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+       MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
+                                             dev_flow->dv.value.buf));
+#endif
        dev_flow->layers = item_flags;
        if (action_flags & MLX5_FLOW_ACTION_RSS)
                flow_dv_hashfields_set(dev_flow);
@@ -7696,7 +7699,7 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                           (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
                        struct mlx5_hrxq *hrxq;
 
-                       assert(flow->rss.queue);
+                       MLX5_ASSERT(flow->rss.queue);
                        hrxq = mlx5_hrxq_get(dev, flow->rss.key,
                                             MLX5_RSS_HASH_KEY_LEN,
                                             dev_flow->hash_fields,
@@ -7782,7 +7785,7 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
 {
        struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
 
-       assert(matcher->matcher_object);
+       MLX5_ASSERT(matcher->matcher_object);
        DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
                dev->data->port_id, (void *)matcher,
                rte_atomic32_read(&matcher->refcnt));
@@ -7815,7 +7818,7 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
        struct mlx5_flow_dv_encap_decap_resource *cache_resource =
                                                flow->dv.encap_decap;
 
-       assert(cache_resource->verbs_action);
+       MLX5_ASSERT(cache_resource->verbs_action);
        DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
@@ -7851,7 +7854,7 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
                        container_of(cache_resource,
                                     struct mlx5_flow_tbl_data_entry, jump);
 
-       assert(cache_resource->action);
+       MLX5_ASSERT(cache_resource->action);
        DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
@@ -7882,7 +7885,7 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
        struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
                                                flow->dv.modify_hdr;
 
-       assert(cache_resource->verbs_action);
+       MLX5_ASSERT(cache_resource->verbs_action);
        DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
@@ -7913,7 +7916,7 @@ flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
        struct mlx5_flow_dv_port_id_action_resource *cache_resource =
                flow->dv.port_id_action;
 
-       assert(cache_resource->action);
+       MLX5_ASSERT(cache_resource->action);
        DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
@@ -7944,7 +7947,7 @@ flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
        struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
                flow->dv.push_vlan_res;
 
-       assert(cache_resource->action);
+       MLX5_ASSERT(cache_resource->action);
        DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
index 32d51c0..aa0fd7a 100644 (file)
@@ -734,7 +734,7 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
                                          NULL, "Meter object is being used.");
        /* Get the meter profile. */
        fmp = fm->profile;
-       RTE_ASSERT(fmp);
+       MLX5_ASSERT(fmp);
        /* Update dependencies. */
        fmp->ref_cnt--;
        /* Remove from the flow meter list. */
@@ -1179,7 +1179,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv, uint32_t meter_id,
                goto error;
        }
        if (!fm->ref_cnt++) {
-               RTE_ASSERT(!fm->mfts->meter_action);
+               MLX5_ASSERT(!fm->mfts->meter_action);
                fm->attr = *attr;
                /* This also creates the meter object. */
                fm->mfts->meter_action = mlx5_flow_meter_action_create(priv,
@@ -1187,7 +1187,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv, uint32_t meter_id,
                if (!fm->mfts->meter_action)
                        goto error_detach;
        } else {
-               RTE_ASSERT(fm->mfts->meter_action);
+               MLX5_ASSERT(fm->mfts->meter_action);
                if (attr->transfer != fm->attr.transfer ||
                    attr->ingress != fm->attr.ingress ||
                    attr->egress != fm->attr.egress) {
@@ -1217,7 +1217,7 @@ mlx5_flow_meter_detach(struct mlx5_flow_meter *fm)
 {
        const struct rte_flow_attr attr = { 0 };
 
-       RTE_ASSERT(fm->ref_cnt);
+       MLX5_ASSERT(fm->ref_cnt);
        if (--fm->ref_cnt)
                return;
        if (fm->mfts->meter_action)
@@ -1255,7 +1255,7 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
 
        TAILQ_FOREACH_SAFE(fm, fms, next, tmp) {
                /* Meter object must not have any owner. */
-               RTE_ASSERT(!fm->ref_cnt);
+               MLX5_ASSERT(!fm->ref_cnt);
                /* Get meter profile. */
                fmp = fm->profile;
                if (fmp == NULL)
@@ -1278,7 +1278,7 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
        }
        TAILQ_FOREACH_SAFE(fmp, fmps, next, tmp) {
                /* Check unused. */
-               RTE_ASSERT(!fmp->ref_cnt);
+               MLX5_ASSERT(!fmp->ref_cnt);
                /* Remove from list. */
                TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
                rte_free(fmp);
index 9231451..04845f8 100644 (file)
@@ -259,7 +259,7 @@ flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
 
        if (!verbs)
                return;
-       assert(verbs->specs);
+       MLX5_ASSERT(verbs->specs);
        dst = (void *)(verbs->specs + verbs->size);
        memcpy(dst, src, size);
        ++verbs->attr->num_of_specs;
@@ -1709,7 +1709,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                } else {
                        struct mlx5_hrxq *hrxq;
 
-                       assert(flow->rss.queue);
+                       MLX5_ASSERT(flow->rss.queue);
                        hrxq = mlx5_hrxq_get(dev, flow->rss.key,
                                             MLX5_RSS_HASH_KEY_LEN,
                                             dev_flow->hash_fields,
index 0ab2a0e..291f772 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <inttypes.h>
@@ -70,7 +69,7 @@ mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
        struct mlx5_priv *priv = dev->data->dev_private;
        const int vf = priv->config.vf;
 
-       assert(index < MLX5_MAX_MAC_ADDRESSES);
+       MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
        if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index]))
                return;
        if (vf)
@@ -101,7 +100,7 @@ mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
        const int vf = priv->config.vf;
        unsigned int i;
 
-       assert(index < MLX5_MAX_MAC_ADDRESSES);
+       MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
        if (rte_is_zero_ether_addr(mac)) {
                rte_errno = EINVAL;
                return -rte_errno;
index 2a031e2..55d408f 100644 (file)
@@ -3,7 +3,6 @@
  * Copyright 2019 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdio.h>
 #include <time.h>
 
@@ -62,7 +61,7 @@ mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
        uint32_t lkey;
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        if (!rte_eth_dev_is_valid_port(param->port_id)) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
@@ -121,7 +120,7 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
        struct rte_eth_dev *dev;
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        if (!rte_eth_dev_is_valid_port(param->port_id)) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
@@ -175,7 +174,7 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum mlx5_mp_req_type type)
        int ret;
        int i;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        if (!mlx5_shared_data->secondary_cnt)
                return;
        if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) {
@@ -258,7 +257,7 @@ mlx5_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr)
        struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        mp_init_msg(dev, &mp_req, MLX5_MP_REQ_CREATE_MR);
        req->args.addr = addr;
        ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
@@ -267,7 +266,7 @@ mlx5_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr)
                        dev->data->port_id);
                return -rte_errno;
        }
-       assert(mp_rep.nb_received == 1);
+       MLX5_ASSERT(mp_rep.nb_received == 1);
        mp_res = &mp_rep.msgs[0];
        res = (struct mlx5_mp_param *)mp_res->param;
        ret = res->result;
@@ -300,7 +299,7 @@ mlx5_mp_req_queue_state_modify(struct rte_eth_dev *dev,
        struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        mp_init_msg(dev, &mp_req, MLX5_MP_REQ_QUEUE_STATE_MODIFY);
        req->args.state_modify = *sm;
        ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
@@ -309,7 +308,7 @@ mlx5_mp_req_queue_state_modify(struct rte_eth_dev *dev,
                        dev->data->port_id);
                return -rte_errno;
        }
-       assert(mp_rep.nb_received == 1);
+       MLX5_ASSERT(mp_rep.nb_received == 1);
        mp_res = &mp_rep.msgs[0];
        res = (struct mlx5_mp_param *)mp_res->param;
        ret = res->result;
@@ -336,7 +335,7 @@ mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev)
        struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        mp_init_msg(dev, &mp_req, MLX5_MP_REQ_VERBS_CMD_FD);
        ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
        if (ret) {
@@ -344,7 +343,7 @@ mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev)
                        dev->data->port_id);
                return -rte_errno;
        }
-       assert(mp_rep.nb_received == 1);
+       MLX5_ASSERT(mp_rep.nb_received == 1);
        mp_res = &mp_rep.msgs[0];
        res = (struct mlx5_mp_param *)mp_res->param;
        if (res->result) {
@@ -355,7 +354,7 @@ mlx5_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev)
                ret = -rte_errno;
                goto exit;
        }
-       assert(mp_res->num_fds == 1);
+       MLX5_ASSERT(mp_res->num_fds == 1);
        ret = mp_res->fds[0];
        DRV_LOG(DEBUG, "port %u command FD from primary is %d",
                dev->data->port_id, ret);
@@ -372,7 +371,7 @@ mlx5_mp_init_primary(void)
 {
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 
        /* primary is allowed to not support IPC */
        ret = rte_mp_action_register(MLX5_MP_NAME, mp_primary_handle);
@@ -387,7 +386,7 @@ mlx5_mp_init_primary(void)
 void
 mlx5_mp_uninit_primary(void)
 {
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        rte_mp_action_unregister(MLX5_MP_NAME);
 }
 
@@ -397,7 +396,7 @@ mlx5_mp_uninit_primary(void)
 int
 mlx5_mp_init_secondary(void)
 {
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        return rte_mp_action_register(MLX5_MP_NAME, mp_secondary_handle);
 }
 
@@ -407,6 +406,6 @@ mlx5_mp_init_secondary(void)
 void
 mlx5_mp_uninit_secondary(void)
 {
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        rte_mp_action_unregister(MLX5_MP_NAME);
 }
index 764a741..cb97c87 100644 (file)
@@ -99,12 +99,12 @@ mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
        uint16_t n;
        uint16_t base = 0;
 
-       assert(bt != NULL);
+       MLX5_ASSERT(bt != NULL);
        lkp_tbl = *bt->table;
        n = bt->len;
        /* First entry must be NULL for comparison. */
-       assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
-                              lkp_tbl[0].lkey == UINT32_MAX));
+       MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+                                   lkp_tbl[0].lkey == UINT32_MAX));
        /* Binary search. */
        do {
                register uint16_t delta = n >> 1;
@@ -116,7 +116,7 @@ mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
                        n -= delta;
                }
        } while (n > 1);
-       assert(addr >= lkp_tbl[base].start);
+       MLX5_ASSERT(addr >= lkp_tbl[base].start);
        *idx = base;
        if (addr < lkp_tbl[base].end)
                return lkp_tbl[base].lkey;
@@ -142,9 +142,9 @@ mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
        uint16_t idx = 0;
        size_t shift;
 
-       assert(bt != NULL);
-       assert(bt->len <= bt->size);
-       assert(bt->len > 0);
+       MLX5_ASSERT(bt != NULL);
+       MLX5_ASSERT(bt->len <= bt->size);
+       MLX5_ASSERT(bt->len > 0);
        lkp_tbl = *bt->table;
        /* Find out the slot for insertion. */
        if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
@@ -194,7 +194,7 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       assert(!bt->table && !bt->size);
+       MLX5_ASSERT(!bt->table && !bt->size);
        memset(bt, 0, sizeof(*bt));
        bt->table = rte_calloc_socket("B-tree table",
                                      n, sizeof(struct mlx5_mr_cache),
@@ -284,9 +284,9 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
        if (mr->msl == NULL) {
                struct ibv_mr *ibv_mr = mr->ibv_mr;
 
-               assert(mr->ms_bmp_n == 1);
-               assert(mr->ms_n == 1);
-               assert(base_idx == 0);
+               MLX5_ASSERT(mr->ms_bmp_n == 1);
+               MLX5_ASSERT(mr->ms_n == 1);
+               MLX5_ASSERT(base_idx == 0);
                /*
                 * Can't search it from memseg list but get it directly from
                 * verbs MR as there's only one chunk.
@@ -305,7 +305,7 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
                        msl = mr->msl;
                        ms = rte_fbarray_get(&msl->memseg_arr,
                                             mr->ms_base_idx + idx);
-                       assert(msl->page_sz == ms->hugepage_sz);
+                       MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
                        if (!start)
                                start = ms->addr_64;
                        end = ms->addr_64 + ms->hugepage_sz;
@@ -439,8 +439,8 @@ mr_lookup_dev(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
                if (mr != NULL)
                        lkey = entry->lkey;
        }
-       assert(lkey == UINT32_MAX || (addr >= entry->start &&
-                                     addr < entry->end));
+       MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+                                          addr < entry->end));
        return lkey;
 }
 
@@ -477,7 +477,7 @@ mlx5_mr_garbage_collect(struct mlx5_ibv_shared *sh)
        struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
 
        /* Must be called from the primary process. */
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        /*
         * MR can't be freed with holding the lock because rte_free() could call
         * memory free callback function. This will be a deadlock situation.
@@ -550,7 +550,7 @@ mlx5_mr_create_secondary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
        /* Fill in output data. */
        mr_lookup_dev(priv->sh, entry, addr);
        /* Lookup can't fail. */
-       assert(entry->lkey != UINT32_MAX);
+       MLX5_ASSERT(entry->lkey != UINT32_MAX);
        rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
        DEBUG("port %u MR CREATED by primary process for %p:\n"
              "  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
@@ -635,12 +635,12 @@ mlx5_mr_create_primary(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
        }
 alloc_resources:
        /* Addresses must be page-aligned. */
-       assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
-       assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+       MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+       MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
        msl = data.msl;
        ms = rte_mem_virt2memseg((void *)data.start, msl);
        len = data.end - data.start;
-       assert(msl->page_sz == ms->hugepage_sz);
+       MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
        /* Number of memsegs in the range. */
        ms_n = len / msl->page_sz;
        DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
@@ -707,7 +707,7 @@ alloc_resources:
                mr_free(mr);
                goto alloc_resources;
        }
-       assert(data.msl == data_re.msl);
+       MLX5_ASSERT(data.msl == data_re.msl);
        rte_rwlock_write_lock(&sh->mr.rwlock);
        /*
         * Check the address is really missing. If other thread already created
@@ -760,7 +760,7 @@ alloc_resources:
        }
        len = data.end - data.start;
        mr->ms_bmp_n = len / msl->page_sz;
-       assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+       MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
        /*
         * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
         * called with holding the memory lock because it doesn't use
@@ -775,8 +775,8 @@ alloc_resources:
                rte_errno = EINVAL;
                goto err_mrlock;
        }
-       assert((uintptr_t)mr->ibv_mr->addr == data.start);
-       assert(mr->ibv_mr->length == len);
+       MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+       MLX5_ASSERT(mr->ibv_mr->length == len);
        LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
        DEBUG("port %u MR CREATED (%p) for %p:\n"
              "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
@@ -789,7 +789,7 @@ alloc_resources:
        /* Fill in output data. */
        mr_lookup_dev(sh, entry, addr);
        /* Lookup can't fail. */
-       assert(entry->lkey != UINT32_MAX);
+       MLX5_ASSERT(entry->lkey != UINT32_MAX);
        rte_rwlock_write_unlock(&sh->mr.rwlock);
        rte_mcfg_mem_read_unlock();
        return entry->lkey;
@@ -895,8 +895,9 @@ mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
              sh->ibdev_name, addr, len);
        msl = rte_mem_virt2memseg_list(addr);
        /* addr and len must be page-aligned. */
-       assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
-       assert(len == RTE_ALIGN(len, msl->page_sz));
+       MLX5_ASSERT((uintptr_t)addr ==
+                   RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+       MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
        ms_n = len / msl->page_sz;
        rte_rwlock_write_lock(&sh->mr.rwlock);
        /* Clear bits of freed memsegs from MR. */
@@ -912,14 +913,14 @@ mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
                mr = mr_lookup_dev_list(sh, &entry, start);
                if (mr == NULL)
                        continue;
-               assert(mr->msl); /* Can't be external memory. */
+               MLX5_ASSERT(mr->msl); /* Can't be external memory. */
                ms = rte_mem_virt2memseg((void *)start, msl);
-               assert(ms != NULL);
-               assert(msl->page_sz == ms->hugepage_sz);
+               MLX5_ASSERT(ms != NULL);
+               MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
                ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
                pos = ms_idx - mr->ms_base_idx;
-               assert(rte_bitmap_get(mr->ms_bmp, pos));
-               assert(pos < mr->ms_bmp_n);
+               MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+               MLX5_ASSERT(pos < mr->ms_bmp_n);
                DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
                      sh->ibdev_name, (void *)mr, pos, (void *)start);
                rte_bitmap_clear(mr->ms_bmp, pos);
@@ -973,7 +974,7 @@ mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
        struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
 
        /* Must be called from the primary process. */
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        switch (event_type) {
        case RTE_MEM_EVENT_FREE:
                rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -1267,7 +1268,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
        struct mlx5_mr_cache entry;
        uint32_t lkey;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        /* If already registered, it should return. */
        rte_rwlock_read_lock(&sh->mr.rwlock);
        lkey = mr_lookup_dev(sh, &entry, addr);
index 345ce3a..653b069 100644 (file)
@@ -7,7 +7,6 @@
 #include <stdint.h>
 #include <errno.h>
 #include <string.h>
-#include <assert.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -218,7 +217,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
                pos = i % RTE_RETA_GROUP_SIZE;
                if (((reta_conf[idx].mask >> i) & 0x1) == 0)
                        continue;
-               assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
+               MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
                (*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
        }
        if (dev->data->dev_started) {
index e01cbfd..dc0fd82 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <errno.h>
 #include <string.h>
 #include <stdint.h>
@@ -127,7 +126,7 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
                        ++n;
        }
        /* Multi-Packet RQ can't be partially configured. */
-       assert(n == 0 || n == n_ibv);
+       MLX5_ASSERT(n == 0 || n == n_ibv);
        return n == n_ibv;
 }
 
@@ -210,11 +209,11 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
                        goto error;
                }
                /* Headroom is reserved by rte_pktmbuf_alloc(). */
-               assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+               MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
                /* Buffer is supposed to be empty. */
-               assert(rte_pktmbuf_data_len(buf) == 0);
-               assert(rte_pktmbuf_pkt_len(buf) == 0);
-               assert(!buf->next);
+               MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+               MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
+               MLX5_ASSERT(!buf->next);
                /* Only the first segment keeps headroom. */
                if (i % sges_n)
                        SET_DATA_OFF(buf, 0);
@@ -303,7 +302,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
                rxq->port_id, rxq->idx);
        if (rxq->mprq_bufs == NULL)
                return;
-       assert(mlx5_rxq_check_vec_support(rxq) < 0);
+       MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
        for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
                if ((*rxq->mprq_bufs)[i] != NULL)
                        mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
@@ -660,7 +659,7 @@ rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
 {
        struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-       assert(rxq_obj);
+       MLX5_ASSERT(rxq_obj);
        rq_attr.state = MLX5_RQC_STATE_RST;
        rq_attr.rq_state = MLX5_RQC_STATE_RDY;
        mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
@@ -679,26 +678,26 @@ rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
 static int
 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
 {
-       assert(rxq_obj);
+       MLX5_ASSERT(rxq_obj);
        if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
                switch (rxq_obj->type) {
                case MLX5_RXQ_OBJ_TYPE_IBV:
-                       assert(rxq_obj->wq);
-                       assert(rxq_obj->cq);
+                       MLX5_ASSERT(rxq_obj->wq);
+                       MLX5_ASSERT(rxq_obj->cq);
                        rxq_free_elts(rxq_obj->rxq_ctrl);
                        claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
                        claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
                        break;
                case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
-                       assert(rxq_obj->cq);
-                       assert(rxq_obj->rq);
+                       MLX5_ASSERT(rxq_obj->cq);
+                       MLX5_ASSERT(rxq_obj->rq);
                        rxq_free_elts(rxq_obj->rxq_ctrl);
                        claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
                        rxq_release_rq_resources(rxq_obj->rxq_ctrl);
                        claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
                        break;
                case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
-                       assert(rxq_obj->rq);
+                       MLX5_ASSERT(rxq_obj->rq);
                        rxq_obj_hairpin_release(rxq_obj);
                        break;
                }
@@ -1270,8 +1269,8 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_rxq_obj *tmpl = NULL;
        int ret = 0;
 
-       assert(rxq_data);
-       assert(!rxq_ctrl->obj);
+       MLX5_ASSERT(rxq_data);
+       MLX5_ASSERT(!rxq_ctrl->obj);
        tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
                                 rxq_ctrl->socket);
        if (!tmpl) {
@@ -1342,8 +1341,8 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
        int ret = 0;
        struct mlx5dv_obj obj;
 
-       assert(rxq_data);
-       assert(!rxq_ctrl->obj);
+       MLX5_ASSERT(rxq_data);
+       MLX5_ASSERT(!rxq_ctrl->obj);
        if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
                return mlx5_rxq_obj_hairpin_new(dev, idx);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
@@ -1637,7 +1636,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
                if (strd_sz_n < rxq->strd_sz_n)
                        strd_sz_n = rxq->strd_sz_n;
        }
-       assert(strd_num_n && strd_sz_n);
+       MLX5_ASSERT(strd_num_n && strd_sz_n);
        buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
        obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
                sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
@@ -1742,7 +1741,7 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
            MLX5_MAX_TCP_HDR_OFFSET)
                max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
        max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
-       assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
+       MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
        max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
        if (priv->max_lro_msg_size)
                priv->max_lro_msg_size =
@@ -2075,7 +2074,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
        if (!(*priv->rxqs)[idx])
                return 0;
        rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-       assert(rxq_ctrl->priv);
+       MLX5_ASSERT(rxq_ctrl->priv);
        if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
                rxq_ctrl->obj = NULL;
        if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
index 0b94ddc..371f96c 100644 (file)
@@ -3,7 +3,6 @@
  * Copyright 2015-2019 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -784,7 +783,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
                        byte_count = DATA_LEN(buf);
                }
                /* scat->addr must be able to store a pointer. */
-               assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+               MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
                *scat = (struct mlx5_wqe_data_seg){
                        .addr = rte_cpu_to_be_64(addr),
                        .byte_count = rte_cpu_to_be_32(byte_count),
@@ -1325,7 +1324,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                break;
                        }
                        while (pkt != seg) {
-                               assert(pkt != (*rxq->elts)[idx]);
+                               MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
                                rep = NEXT(pkt);
                                NEXT(pkt) = NULL;
                                NB_SEGS(pkt) = 1;
@@ -1342,7 +1341,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                break;
                        }
                        pkt = seg;
-                       assert(len >= (rxq->crc_present << 2));
+                       MLX5_ASSERT(len >= (rxq->crc_present << 2));
                        pkt->ol_flags &= EXT_ATTACHED_MBUF;
                        /* If compressed, take hash result from mini-CQE. */
                        rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
@@ -1533,7 +1532,7 @@ mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
                &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
        void *addr;
 
-       assert(rep != NULL);
+       MLX5_ASSERT(rep != NULL);
        /* Replace MPRQ buf. */
        (*rxq->mprq_bufs)[rq_idx] = rep;
        /* Replace WQE. */
@@ -1623,7 +1622,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                byte_cnt = ret;
                strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
                           MLX5_MPRQ_STRIDE_NUM_SHIFT;
-               assert(strd_cnt);
+               MLX5_ASSERT(strd_cnt);
                consumed_strd += strd_cnt;
                if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
                        continue;
@@ -1634,8 +1633,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        /* mini-CQE for MPRQ doesn't have hash result. */
                        strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
                }
-               assert(strd_idx < strd_n);
-               assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+               MLX5_ASSERT(strd_idx < strd_n);
+               MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
+                           wq_mask));
                lro_num_seg = cqe->lro_num_seg;
                /*
                 * Currently configured to receive a packet per a stride. But if
@@ -1654,7 +1654,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        break;
                }
                len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
-               assert((int)len >= (rxq->crc_present << 2));
+               MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
                if (rxq->crc_present)
                        len -= RTE_ETHER_CRC_LEN;
                offset = strd_idx * strd_sz + strd_shift;
@@ -1684,8 +1684,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 
                        /* Increment the refcnt of the whole chunk. */
                        rte_atomic16_add_return(&buf->refcnt, 1);
-                       assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
-                              strd_n + 1);
+                       MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+                                   strd_n + 1);
                        buf_addr = RTE_PTR_SUB(addr, headroom_sz);
                        /*
                         * MLX5 device doesn't use iova but it is necessary in a
@@ -1706,7 +1706,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                                  buf_len, shinfo);
                        /* Set mbuf head-room. */
                        pkt->data_off = headroom_sz;
-                       assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+                       MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
                        /*
                         * Prevent potential overflow due to MTU change through
                         * kernel interface.
@@ -1872,8 +1872,8 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
         * copying pointers to temporary array
         * for rte_mempool_put_bulk() calls.
         */
-       assert(pkts);
-       assert(pkts_n);
+       MLX5_ASSERT(pkts);
+       MLX5_ASSERT(pkts_n);
        for (;;) {
                for (;;) {
                        /*
@@ -1882,7 +1882,7 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
                         */
                        mbuf = rte_pktmbuf_prefree_seg(*pkts);
                        if (likely(mbuf != NULL)) {
-                               assert(mbuf == *pkts);
+                               MLX5_ASSERT(mbuf == *pkts);
                                if (likely(n_free != 0)) {
                                        if (unlikely(pool != mbuf->pool))
                                                /* From different pool. */
@@ -1919,9 +1919,9 @@ mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
                         * This loop is implemented to avoid multiple
                         * inlining of rte_mempool_put_bulk().
                         */
-                       assert(pool);
-                       assert(p_free);
-                       assert(n_free);
+                       MLX5_ASSERT(pool);
+                       MLX5_ASSERT(p_free);
+                       MLX5_ASSERT(n_free);
                        /*
                         * Free the array of pre-freed mbufs
                         * belonging to the same memory pool.
@@ -1969,8 +1969,8 @@ mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
 {
        uint16_t n_elts = tail - txq->elts_tail;
 
-       assert(n_elts);
-       assert(n_elts <= txq->elts_s);
+       MLX5_ASSERT(n_elts);
+       MLX5_ASSERT(n_elts <= txq->elts_s);
        /*
         * Implement a loop to support ring buffer wraparound
         * with single inlining of mlx5_tx_free_mbuf().
@@ -1980,8 +1980,8 @@ mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
 
                part = txq->elts_s - (txq->elts_tail & txq->elts_m);
                part = RTE_MIN(part, n_elts);
-               assert(part);
-               assert(part <= txq->elts_s);
+               MLX5_ASSERT(part);
+               MLX5_ASSERT(part <= txq->elts_s);
                mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
                                  part, olx);
                txq->elts_tail += part;
@@ -2012,11 +2012,11 @@ mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
        unsigned int part;
        struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
 
-       assert(pkts);
-       assert(pkts_n);
+       MLX5_ASSERT(pkts);
+       MLX5_ASSERT(pkts_n);
        part = txq->elts_s - (txq->elts_head & txq->elts_m);
-       assert(part);
-       assert(part <= txq->elts_s);
+       MLX5_ASSERT(part);
+       MLX5_ASSERT(part <= txq->elts_s);
        /* This code is a good candidate for vectorizing with SIMD. */
        rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
                   (void *)pkts,
@@ -2052,7 +2052,7 @@ mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
                tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
                if (likely(tail != txq->elts_tail)) {
                        mlx5_tx_free_elts(txq, tail, olx);
-                       assert(tail == txq->elts_tail);
+                       MLX5_ASSERT(tail == txq->elts_tail);
                }
        }
 }
@@ -2090,7 +2090,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
                if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
                        if (likely(ret != MLX5_CQE_STATUS_ERR)) {
                                /* No new CQEs in completion queue. */
-                               assert(ret == MLX5_CQE_STATUS_HW_OWN);
+                               MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
                                break;
                        }
                        /*
@@ -2122,8 +2122,9 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
                        continue;
                }
                /* Normal transmit completion. */
-               assert(ci != txq->cq_pi);
-               assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
+               MLX5_ASSERT(ci != txq->cq_pi);
+               MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) ==
+                           cqe->wqe_counter);
                ++ci;
                last_cqe = cqe;
                /*
@@ -2192,7 +2193,7 @@ mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
                txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
 #endif
                /* A CQE slot must always be available. */
-               assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
+               MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
        }
 }
 
@@ -2306,7 +2307,7 @@ mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
                 * We should get here only if device support
                 * this feature correctly.
                 */
-               assert(txq->vlan_en);
+               MLX5_ASSERT(txq->vlan_en);
                es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
                                                  loc->mbuf->vlan_tci);
        } else {
@@ -2384,7 +2385,7 @@ mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
                                                 loc->mbuf->vlan_tci);
                pdst += sizeof(struct rte_vlan_hdr);
                /* Copy the rest two bytes from packet data. */
-               assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+               MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
                *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
        } else {
                /* Fill the gap in the title WQEBB with inline data. */
@@ -2477,7 +2478,7 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
                                                 loc->mbuf->vlan_tci);
                pdst += sizeof(struct rte_vlan_hdr);
                /* Copy the rest two bytes from packet data. */
-               assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+               MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
                *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
                psrc += sizeof(uint16_t);
        } else {
@@ -2486,11 +2487,11 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
                psrc += sizeof(rte_v128u32_t);
        }
        pdst = (uint8_t *)(es + 2);
-       assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
-       assert(pdst < (uint8_t *)txq->wqes_end);
+       MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+       MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
        inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
        if (!inlen) {
-               assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+               MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
                return (struct mlx5_wqe_dseg *)pdst;
        }
        /*
@@ -2553,8 +2554,8 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
        unsigned int part, dlen, copy = 0;
        uint8_t *psrc;
 
-       assert(len);
-       assert(must <= len);
+       MLX5_ASSERT(len);
+       MLX5_ASSERT(must <= len);
        do {
                /* Allow zero length packets, must check first. */
                dlen = rte_pktmbuf_data_len(loc->mbuf);
@@ -2564,8 +2565,8 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
                        loc->mbuf = mbuf->next;
                        rte_pktmbuf_free_seg(mbuf);
                        loc->mbuf_off = 0;
-                       assert(loc->mbuf_nseg > 1);
-                       assert(loc->mbuf);
+                       MLX5_ASSERT(loc->mbuf_nseg > 1);
+                       MLX5_ASSERT(loc->mbuf);
                        --loc->mbuf_nseg;
                        if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
                                unsigned int diff;
@@ -2604,7 +2605,7 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
                                loc->mbuf = mbuf->next;
                                rte_pktmbuf_free_seg(mbuf);
                                loc->mbuf_off = 0;
-                               assert(loc->mbuf_nseg >= 1);
+                               MLX5_ASSERT(loc->mbuf_nseg >= 1);
                                --loc->mbuf_nseg;
                        }
                        return copy;
@@ -2684,7 +2685,7 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
                                 sizeof(struct rte_vlan_hdr) +
                                 2 * RTE_ETHER_ADDR_LEN),
                      "invalid Ethernet Segment data size");
-       assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+       MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
        pdst = (uint8_t *)&es->inline_data;
        if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
                /* Implement VLAN tag insertion as part inline data. */
@@ -2698,14 +2699,14 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
                pdst += sizeof(struct rte_vlan_hdr);
                tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
        }
-       assert(pdst < (uint8_t *)txq->wqes_end);
+       MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
        /*
         * The WQEBB space availability is checked by caller.
         * Here we should be aware of WQE ring buffer wraparound only.
         */
        part = (uint8_t *)txq->wqes_end - pdst;
        part = RTE_MIN(part, inlen - tlen);
-       assert(part);
+       MLX5_ASSERT(part);
        do {
                unsigned int copy;
 
@@ -2753,7 +2754,7 @@ mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
                 unsigned int olx __rte_unused)
 
 {
-       assert(len);
+       MLX5_ASSERT(len);
        dseg->bcount = rte_cpu_to_be_32(len);
        dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
        dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
@@ -2789,7 +2790,7 @@ mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
 {
        uintptr_t dst, src;
 
-       assert(len);
+       MLX5_ASSERT(len);
        if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
                dseg->bcount = rte_cpu_to_be_32(len);
                dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
@@ -2803,7 +2804,7 @@ mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
        src = (uintptr_t)buf;
        if (len & 0x08) {
 #ifdef RTE_ARCH_STRICT_ALIGN
-               assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+               MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
                *(uint32_t *)dst = *(unaligned_uint32_t *)src;
                dst += sizeof(uint32_t);
                src += sizeof(uint32_t);
@@ -2922,7 +2923,7 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
        unsigned int part;
        uint8_t *pdst;
 
-       assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
+       MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
        static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
                                 (2 * RTE_ETHER_ADDR_LEN),
                      "invalid Data Segment data size");
@@ -2934,7 +2935,7 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
        pdst += MLX5_DSEG_MIN_INLINE_SIZE;
        len -= MLX5_DSEG_MIN_INLINE_SIZE;
        /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
-       assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+       MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
        if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
                pdst = (uint8_t *)txq->wqes;
        *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
@@ -3002,7 +3003,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
        struct mlx5_wqe_dseg *restrict dseg;
        unsigned int ds;
 
-       assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
+       MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
        loc->mbuf_nseg = NB_SEGS(loc->mbuf);
        loc->mbuf_off = 0;
 
@@ -3023,8 +3024,8 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
                 * Non-zero offset means there are some data
                 * remained in the packet.
                 */
-               assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
-               assert(rte_pktmbuf_data_len(loc->mbuf));
+               MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
+               MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
                dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
                                               loc->mbuf_off);
                dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
@@ -3036,7 +3037,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
                        dseg = (struct mlx5_wqe_dseg *)txq->wqes;
                mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
                /* Store the mbuf to be freed on completion. */
-               assert(loc->elts_free);
+               MLX5_ASSERT(loc->elts_free);
                txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
                --loc->elts_free;
                ++dseg;
@@ -3062,7 +3063,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
                                (txq, loc, dseg,
                                 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
                                 rte_pktmbuf_data_len(loc->mbuf), olx);
-                       assert(loc->elts_free);
+                       MLX5_ASSERT(loc->elts_free);
                        txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
                        --loc->elts_free;
                        ++dseg;
@@ -3129,7 +3130,7 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
                     inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
                     inlen > (dlen + vlan)))
                return MLX5_TXCMP_CODE_ERROR;
-       assert(inlen >= txq->inlen_mode);
+       MLX5_ASSERT(inlen >= txq->inlen_mode);
        /*
         * Check whether there are enough free WQEBBs:
         * - Control Segment
@@ -3201,7 +3202,7 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
        struct mlx5_wqe *restrict wqe;
        unsigned int ds, nseg;
 
-       assert(NB_SEGS(loc->mbuf) > 1);
+       MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
        /*
         * No inline at all, it means the CPU cycles saving
         * is prioritized at configuration, we should not
@@ -3308,8 +3309,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
        struct mlx5_wqe *restrict wqe;
        unsigned int ds, inlen, dlen, vlan = 0;
 
-       assert(MLX5_TXOFF_CONFIG(INLINE));
-       assert(NB_SEGS(loc->mbuf) > 1);
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+       MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
        /*
         * First calculate data length to be inlined
         * to estimate the required space for WQE.
@@ -3321,7 +3322,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
        /* Check against minimal length. */
        if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
                return MLX5_TXCMP_CODE_ERROR;
-       assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+       MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
        if (inlen > txq->inlen_send ||
            loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
                struct rte_mbuf *mbuf;
@@ -3334,8 +3335,9 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
                 * inlining is required.
                 */
                if (txq->inlen_mode) {
-                       assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
-                       assert(txq->inlen_mode <= txq->inlen_send);
+                       MLX5_ASSERT(txq->inlen_mode >=
+                                   MLX5_ESEG_MIN_INLINE_SIZE);
+                       MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
                        inlen = txq->inlen_mode;
                } else {
                        if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
@@ -3368,7 +3370,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
                                do {
                                        smlen = nxlen;
                                        mbuf = NEXT(mbuf);
-                                       assert(mbuf);
+                                       MLX5_ASSERT(mbuf);
                                        nxlen = rte_pktmbuf_data_len(mbuf);
                                        nxlen += smlen;
                                } while (unlikely(nxlen < inlen));
@@ -3384,7 +3386,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
                                inlen = nxlen;
                                mbuf = NEXT(mbuf);
                                /* There should be not end of packet. */
-                               assert(mbuf);
+                               MLX5_ASSERT(mbuf);
                                nxlen = inlen + rte_pktmbuf_data_len(mbuf);
                        } while (unlikely(nxlen < txq->inlen_send));
                }
@@ -3412,7 +3414,7 @@ do_align:
         * Estimate the number of Data Segments conservatively,
         * supposing no any mbufs is being freed during inlining.
         */
-       assert(inlen <= txq->inlen_send);
+       MLX5_ASSERT(inlen <= txq->inlen_send);
        ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
                                       MLX5_ESEG_MIN_INLINE_SIZE +
                                       MLX5_WSEG_SIZE +
@@ -3471,14 +3473,14 @@ mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
                   struct mlx5_txq_local *restrict loc,
                   unsigned int olx)
 {
-       assert(loc->elts_free && loc->wqe_free);
-       assert(pkts_n > loc->pkts_sent);
+       MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+       MLX5_ASSERT(pkts_n > loc->pkts_sent);
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
        for (;;) {
                enum mlx5_txcmp_code ret;
 
-               assert(NB_SEGS(loc->mbuf) > 1);
+               MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
                /*
                 * Estimate the number of free elts quickly but
                 * conservatively. Some segment may be fully inlined
@@ -3518,7 +3520,7 @@ mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
                        return MLX5_TXCMP_CODE_TSO;
                return MLX5_TXCMP_CODE_SINGLE;
        }
-       assert(false);
+       MLX5_ASSERT(false);
 }
 
 /**
@@ -3560,8 +3562,8 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
                  struct mlx5_txq_local *restrict loc,
                  unsigned int olx)
 {
-       assert(loc->elts_free && loc->wqe_free);
-       assert(pkts_n > loc->pkts_sent);
+       MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+       MLX5_ASSERT(pkts_n > loc->pkts_sent);
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
        for (;;) {
@@ -3570,7 +3572,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
                unsigned int ds, dlen, hlen, ntcp, vlan = 0;
                uint8_t *dptr;
 
-               assert(NB_SEGS(loc->mbuf) == 1);
+               MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                dlen = rte_pktmbuf_data_len(loc->mbuf);
                if (MLX5_TXOFF_CONFIG(VLAN) &&
                    loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -3654,7 +3656,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
                        return MLX5_TXCMP_CODE_SINGLE;
                /* Continue with the next TSO packet. */
        }
-       assert(false);
+       MLX5_ASSERT(false);
 }
 
 /**
@@ -3761,7 +3763,7 @@ mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
                return false;
        /* There must be no VLAN packets in eMPW loop. */
        if (MLX5_TXOFF_CONFIG(VLAN))
-               assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+               MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
        return true;
 }
 
@@ -3793,7 +3795,7 @@ mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
                   unsigned int slen,
                   unsigned int olx __rte_unused)
 {
-       assert(!MLX5_TXOFF_CONFIG(INLINE));
+       MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
 #ifdef MLX5_PMD_SOFT_COUNTERS
        /* Update sent data bytes counter. */
         txq->stats.obytes += slen;
@@ -3836,8 +3838,8 @@ mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
                   unsigned int slen,
                   unsigned int olx __rte_unused)
 {
-       assert(MLX5_TXOFF_CONFIG(INLINE));
-       assert((len % MLX5_WSEG_SIZE) == 0);
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+       MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
 #ifdef MLX5_PMD_SOFT_COUNTERS
        /* Update sent data bytes counter. */
         txq->stats.obytes += slen;
@@ -3909,10 +3911,10 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
         * and sends single-segment packet with eMPW opcode
         * without data inlining.
         */
-       assert(!MLX5_TXOFF_CONFIG(INLINE));
-       assert(MLX5_TXOFF_CONFIG(EMPW));
-       assert(loc->elts_free && loc->wqe_free);
-       assert(pkts_n > loc->pkts_sent);
+       MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+       MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+       MLX5_ASSERT(pkts_n > loc->pkts_sent);
        static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
@@ -3924,7 +3926,7 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
                unsigned int slen = 0;
 
 next_empw:
-               assert(NB_SEGS(loc->mbuf) == 1);
+               MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
                                       MLX5_MPW_MAX_PACKETS :
                                       MLX5_EMPW_MAX_PACKETS);
@@ -3990,7 +3992,7 @@ next_empw:
                                        return MLX5_TXCMP_CODE_EXIT;
                                return MLX5_TXCMP_CODE_MULTI;
                        }
-                       assert(NB_SEGS(loc->mbuf) == 1);
+                       MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                        if (ret == MLX5_TXCMP_CODE_TSO) {
                                part -= loop;
                                mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
@@ -4008,7 +4010,7 @@ next_empw:
                                return MLX5_TXCMP_CODE_SINGLE;
                        }
                        if (ret != MLX5_TXCMP_CODE_EMPW) {
-                               assert(false);
+                               MLX5_ASSERT(false);
                                part -= loop;
                                mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
                                return MLX5_TXCMP_CODE_ERROR;
@@ -4022,7 +4024,7 @@ next_empw:
                         * - packets length (legacy MPW only)
                         */
                        if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
-                               assert(loop);
+                               MLX5_ASSERT(loop);
                                part -= loop;
                                mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
                                if (unlikely(!loc->elts_free ||
@@ -4037,8 +4039,8 @@ next_empw:
                                dseg = (struct mlx5_wqe_dseg *)txq->wqes;
                }
                /* eMPW is built successfully, update loop parameters. */
-               assert(!loop);
-               assert(pkts_n >= part);
+               MLX5_ASSERT(!loop);
+               MLX5_ASSERT(pkts_n >= part);
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Update sent data bytes counter. */
                txq->stats.obytes += slen;
@@ -4056,7 +4058,7 @@ next_empw:
                        return ret;
                /* Continue sending eMPW batches. */
        }
-       assert(false);
+       MLX5_ASSERT(false);
 }
 
 /**
@@ -4075,10 +4077,10 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
         * and sends single-segment packet with eMPW opcode
         * with data inlining.
         */
-       assert(MLX5_TXOFF_CONFIG(INLINE));
-       assert(MLX5_TXOFF_CONFIG(EMPW));
-       assert(loc->elts_free && loc->wqe_free);
-       assert(pkts_n > loc->pkts_sent);
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+       MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+       MLX5_ASSERT(pkts_n > loc->pkts_sent);
        static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
@@ -4089,7 +4091,7 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
                unsigned int room, part, nlim;
                unsigned int slen = 0;
 
-               assert(NB_SEGS(loc->mbuf) == 1);
+               MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                /*
                 * Limits the amount of packets in one WQE
                 * to improve CQE latency generation.
@@ -4130,9 +4132,9 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
                        uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
                        unsigned int tlen;
 
-                       assert(room >= MLX5_WQE_DSEG_SIZE);
-                       assert((room % MLX5_WQE_DSEG_SIZE) == 0);
-                       assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
+                       MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
+                       MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
+                       MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
                        /*
                         * Some Tx offloads may cause an error if
                         * packet is not long enough, check against
@@ -4162,8 +4164,9 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
                                 * mlx5_tx_able_to_empw() and packet
                                 * fits into inline length guaranteed.
                                 */
-                               assert((dlen + sizeof(struct rte_vlan_hdr)) <=
-                                       txq->inlen_empw);
+                               MLX5_ASSERT((dlen +
+                                            sizeof(struct rte_vlan_hdr)) <=
+                                           txq->inlen_empw);
                                tlen += sizeof(struct rte_vlan_hdr);
                                if (room < tlen)
                                        break;
@@ -4180,7 +4183,7 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
                                                         dptr, dlen, olx);
                        }
                        tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
-                       assert(room >= tlen);
+                       MLX5_ASSERT(room >= tlen);
                        room -= tlen;
                        /*
                         * Packet data are completely inlined,
@@ -4193,10 +4196,10 @@ pointer_empw:
                         * Not inlinable VLAN packets are
                         * proceeded outside of this routine.
                         */
-                       assert(room >= MLX5_WQE_DSEG_SIZE);
+                       MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
                        if (MLX5_TXOFF_CONFIG(VLAN))
-                               assert(!(loc->mbuf->ol_flags &
-                                        PKT_TX_VLAN_PKT));
+                               MLX5_ASSERT(!(loc->mbuf->ol_flags &
+                                           PKT_TX_VLAN_PKT));
                        mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
                        /* We have to store mbuf in elts.*/
                        txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
@@ -4237,7 +4240,7 @@ next_mbuf:
                                        return MLX5_TXCMP_CODE_EXIT;
                                return MLX5_TXCMP_CODE_MULTI;
                        }
-                       assert(NB_SEGS(loc->mbuf) == 1);
+                       MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                        if (ret == MLX5_TXCMP_CODE_TSO) {
                                part -= room;
                                mlx5_tx_idone_empw(txq, loc, part, slen, olx);
@@ -4255,7 +4258,7 @@ next_mbuf:
                                return MLX5_TXCMP_CODE_SINGLE;
                        }
                        if (ret != MLX5_TXCMP_CODE_EMPW) {
-                               assert(false);
+                               MLX5_ASSERT(false);
                                part -= room;
                                mlx5_tx_idone_empw(txq, loc, part, slen, olx);
                                return MLX5_TXCMP_CODE_ERROR;
@@ -4282,7 +4285,7 @@ next_mbuf:
                 * We get here to close an existing eMPW
                 * session and start the new one.
                 */
-               assert(pkts_n);
+               MLX5_ASSERT(pkts_n);
                part -= room;
                if (unlikely(!part))
                        return MLX5_TXCMP_CODE_EXIT;
@@ -4292,7 +4295,7 @@ next_mbuf:
                        return MLX5_TXCMP_CODE_EXIT;
                /* Continue the loop with new eMPW session. */
        }
-       assert(false);
+       MLX5_ASSERT(false);
 }
 
 /**
@@ -4310,15 +4313,15 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
         * Subroutine is the part of mlx5_tx_burst_single()
         * and sends single-segment packet with SEND opcode.
         */
-       assert(loc->elts_free && loc->wqe_free);
-       assert(pkts_n > loc->pkts_sent);
+       MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+       MLX5_ASSERT(pkts_n > loc->pkts_sent);
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
        for (;;) {
                struct mlx5_wqe *restrict wqe;
                enum mlx5_txcmp_code ret;
 
-               assert(NB_SEGS(loc->mbuf) == 1);
+               MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
                if (MLX5_TXOFF_CONFIG(INLINE)) {
                        unsigned int inlen, vlan = 0;
 
@@ -4338,7 +4341,8 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
                         * Otherwise we would do extra check for data
                         * size to avoid crashes due to length overflow.
                         */
-                       assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+                       MLX5_ASSERT(txq->inlen_send >=
+                                   MLX5_ESEG_MIN_INLINE_SIZE);
                        if (inlen <= txq->inlen_send) {
                                unsigned int seg_n, wqe_n;
 
@@ -4424,10 +4428,10 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
                                 * WQE ring buffer to inline partially.
                                 */
 single_min_inline:
-                               assert(txq->inlen_send >= txq->inlen_mode);
-                               assert(inlen > txq->inlen_mode);
-                               assert(txq->inlen_mode >=
-                                               MLX5_ESEG_MIN_INLINE_SIZE);
+                               MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
+                               MLX5_ASSERT(inlen > txq->inlen_mode);
+                               MLX5_ASSERT(txq->inlen_mode >=
+                                           MLX5_ESEG_MIN_INLINE_SIZE);
                                /*
                                 * Check whether there are enough free WQEBBs:
                                 * - Control Segment
@@ -4470,7 +4474,7 @@ single_min_inline:
                                txq->wqe_ci += (ds + 3) / 4;
                                loc->wqe_free -= (ds + 3) / 4;
                                /* We have to store mbuf in elts.*/
-                               assert(MLX5_TXOFF_CONFIG(INLINE));
+                               MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
                                txq->elts[txq->elts_head++ & txq->elts_m] =
                                                loc->mbuf;
                                --loc->elts_free;
@@ -4504,14 +4508,14 @@ single_part_inline:
                                 * comparing with txq->inlen_send. We should
                                 * not get overflow here.
                                 */
-                               assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
+                               MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
                                dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
                                mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
                                                 dptr, dlen, olx);
                                ++txq->wqe_ci;
                                --loc->wqe_free;
                                /* We have to store mbuf in elts.*/
-                               assert(MLX5_TXOFF_CONFIG(INLINE));
+                               MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
                                txq->elts[txq->elts_head++ & txq->elts_m] =
                                                loc->mbuf;
                                --loc->elts_free;
@@ -4549,7 +4553,7 @@ single_no_inline:
                         * if no inlining is configured, this is done
                         * by calling routine in a batch copy.
                         */
-                       assert(!MLX5_TXOFF_CONFIG(INLINE));
+                       MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
                        --loc->elts_free;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                        /* Update sent data bytes counter. */
@@ -4571,7 +4575,7 @@ single_no_inline:
                if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
                        return ret;
        }
-       assert(false);
+       MLX5_ASSERT(false);
 }
 
 static __rte_always_inline enum mlx5_txcmp_code
@@ -4586,7 +4590,7 @@ mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
        ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
        if (ret == MLX5_TXCMP_CODE_SINGLE)
                goto ordinary_send;
-       assert(ret == MLX5_TXCMP_CODE_EMPW);
+       MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
        for (;;) {
                /* Optimize for inline/no inline eMPW send. */
                ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
@@ -4597,14 +4601,14 @@ mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
                if (ret != MLX5_TXCMP_CODE_SINGLE)
                        return ret;
                /* The resources to send one packet should remain. */
-               assert(loc->elts_free && loc->wqe_free);
+               MLX5_ASSERT(loc->elts_free && loc->wqe_free);
 ordinary_send:
                ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
-               assert(ret != MLX5_TXCMP_CODE_SINGLE);
+               MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
                if (ret != MLX5_TXCMP_CODE_EMPW)
                        return ret;
                /* The resources to send one packet should remain. */
-               assert(loc->elts_free && loc->wqe_free);
+               MLX5_ASSERT(loc->elts_free && loc->wqe_free);
        }
 }
 
@@ -4638,8 +4642,8 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
        enum mlx5_txcmp_code ret;
        unsigned int part;
 
-       assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
-       assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+       MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+       MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
        if (unlikely(!pkts_n))
                return 0;
        loc.pkts_sent = 0;
@@ -4665,10 +4669,10 @@ send_loop:
         * - data inlining into WQEs, one packet may require multiple
         *   WQEBBs, the WQEs become the limiting factor.
         */
-       assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+       MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
        loc.elts_free = txq->elts_s -
                                (uint16_t)(txq->elts_head - txq->elts_tail);
-       assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+       MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
        loc.wqe_free = txq->wqe_s -
                                (uint16_t)(txq->wqe_ci - txq->wqe_pi);
        if (unlikely(!loc.elts_free || !loc.wqe_free))
@@ -4690,7 +4694,7 @@ send_loop:
                         * per WQE, do it in dedicated routine.
                         */
 enter_send_multi:
-                       assert(loc.pkts_sent >= loc.pkts_copy);
+                       MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
                        part = loc.pkts_sent - loc.pkts_copy;
                        if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
                                /*
@@ -4704,7 +4708,7 @@ enter_send_multi:
                                                  part, olx);
                                loc.pkts_copy = loc.pkts_sent;
                        }
-                       assert(pkts_n > loc.pkts_sent);
+                       MLX5_ASSERT(pkts_n > loc.pkts_sent);
                        ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
                        if (!MLX5_TXOFF_CONFIG(INLINE))
                                loc.pkts_copy = loc.pkts_sent;
@@ -4746,7 +4750,7 @@ enter_send_multi:
                                goto enter_send_tso;
                        }
                        /* We must not get here. Something is going wrong. */
-                       assert(false);
+                       MLX5_ASSERT(false);
                        txq->stats.oerrors++;
                        break;
                }
@@ -4760,8 +4764,8 @@ enter_send_multi:
                         * in dedicated branch.
                         */
 enter_send_tso:
-                       assert(NB_SEGS(loc.mbuf) == 1);
-                       assert(pkts_n > loc.pkts_sent);
+                       MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
+                       MLX5_ASSERT(pkts_n > loc.pkts_sent);
                        ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
                        /*
                         * These returned code checks are supposed
@@ -4784,7 +4788,7 @@ enter_send_tso:
                                goto enter_send_multi;
                        }
                        /* We must not get here. Something is going wrong. */
-                       assert(false);
+                       MLX5_ASSERT(false);
                        txq->stats.oerrors++;
                        break;
                }
@@ -4797,7 +4801,7 @@ enter_send_tso:
                 * offloads are requested at SQ configuration time).
                 */
 enter_send_single:
-               assert(pkts_n > loc.pkts_sent);
+               MLX5_ASSERT(pkts_n > loc.pkts_sent);
                ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
                /*
                 * These returned code checks are supposed
@@ -4826,7 +4830,7 @@ enter_send_single:
                        goto enter_send_tso;
                }
                /* We must not get here. Something is going wrong. */
-               assert(false);
+               MLX5_ASSERT(false);
                txq->stats.oerrors++;
                break;
        }
@@ -4836,7 +4840,8 @@ enter_send_single:
         * - doorbell the hardware
         * - copy the rest of mbufs to elts (if any)
         */
-       assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
+       MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
+                   loc.pkts_sent >= loc.pkts_copy);
        /* Take a shortcut if nothing is sent. */
        if (unlikely(loc.pkts_sent == loc.pkts_loop))
                goto burst_exit;
@@ -4889,8 +4894,8 @@ enter_send_single:
                mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
                loc.pkts_copy = loc.pkts_sent;
        }
-       assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
-       assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+       MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+       MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
        if (pkts_n > loc.pkts_sent) {
                /*
                 * If burst size is large there might be no enough CQE
@@ -5261,7 +5266,7 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
                      "invalid WQE Data Segment size");
        static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
                      "invalid WQE size");
-       assert(priv);
+       MLX5_ASSERT(priv);
        if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
                /* We should support Multi-Segment Packets. */
                olx |= MLX5_TXOFF_CONFIG_MULTI;
index 5505762..9d7a4ce 100644 (file)
@@ -3,7 +3,6 @@
  * Copyright 2017 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
index 82f77e5..ea92515 100644 (file)
@@ -85,9 +85,10 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
                &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
        unsigned int i;
 
-       assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
-       assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
-       assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
+       MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
+       MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+       MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) >
+                   MLX5_VPMD_DESCS_PER_LOOP);
        /* Not to cross queue end. */
        n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
        if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
index 1467a42..aa43cab 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -616,8 +615,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        const vector unsigned short cqe_sel_mask2 =
                (vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
 
-       assert(rxq->sges_n == 0);
-       assert(rxq->cqe_n == rxq->elts_n);
+       MLX5_ASSERT(rxq->sges_n == 0);
+       MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
        cq = &(*rxq->cqes)[cq_idx];
        rte_prefetch0(cq);
        rte_prefetch0(cq + 1);
@@ -647,7 +646,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (!pkts_n)
                return rcvd_pkt;
        /* At this point, there shouldn't be any remaining packets. */
-       assert(rxq->decompressed == 0);
+       MLX5_ASSERT(rxq->decompressed == 0);
 
        /*
         * A. load first Qword (8bytes) in one loop.
@@ -1063,7 +1062,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
                return rcvd_pkt;
        /* Update the consumer indexes for non-compressed CQEs. */
-       assert(nocmp_n <= pkts_n);
+       MLX5_ASSERT(nocmp_n <= pkts_n);
        rxq->cq_ci += nocmp_n;
        rxq->rq_pi += nocmp_n;
        rcvd_pkt += nocmp_n;
@@ -1073,7 +1072,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
 #endif
        /* Decompress the last CQE if compressed. */
        if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-               assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+               MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
                rxq->decompressed =
                        rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
                /* Return more packets if needed. */
index 5b846c1..6d952df 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -440,8 +439,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        };
        const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
 
-       assert(rxq->sges_n == 0);
-       assert(rxq->cqe_n == rxq->elts_n);
+       MLX5_ASSERT(rxq->sges_n == 0);
+       MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
        cq = &(*rxq->cqes)[cq_idx];
        rte_prefetch_non_temporal(cq);
        rte_prefetch_non_temporal(cq + 1);
@@ -470,7 +469,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (!pkts_n)
                return rcvd_pkt;
        /* At this point, there shouldn't be any remained packets. */
-       assert(rxq->decompressed == 0);
+       MLX5_ASSERT(rxq->decompressed == 0);
        /*
         * Note that vectors have reverse order - {v3, v2, v1, v0}, because
         * there's no instruction to count trailing zeros. __builtin_clzl() is
@@ -728,7 +727,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
                return rcvd_pkt;
        /* Update the consumer indexes for non-compressed CQEs. */
-       assert(nocmp_n <= pkts_n);
+       MLX5_ASSERT(nocmp_n <= pkts_n);
        rxq->cq_ci += nocmp_n;
        rxq->rq_pi += nocmp_n;
        rcvd_pkt += nocmp_n;
@@ -738,7 +737,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
 #endif
        /* Decompress the last CQE if compressed. */
        if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-               assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+               MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
                rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
                                                        &elts[nocmp_n]);
                /* Return more packets if needed. */
index 6e1b967..406f23f 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -427,8 +426,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                              rxq->crc_present * RTE_ETHER_CRC_LEN);
        const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
 
-       assert(rxq->sges_n == 0);
-       assert(rxq->cqe_n == rxq->elts_n);
+       MLX5_ASSERT(rxq->sges_n == 0);
+       MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
        cq = &(*rxq->cqes)[cq_idx];
        rte_prefetch0(cq);
        rte_prefetch0(cq + 1);
@@ -457,7 +456,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (!pkts_n)
                return rcvd_pkt;
        /* At this point, there shouldn't be any remained packets. */
-       assert(rxq->decompressed == 0);
+       MLX5_ASSERT(rxq->decompressed == 0);
        /*
         * A. load first Qword (8bytes) in one loop.
         * B. copy 4 mbuf pointers from elts ring to returing pkts.
@@ -678,7 +677,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
        if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
                return rcvd_pkt;
        /* Update the consumer indexes for non-compressed CQEs. */
-       assert(nocmp_n <= pkts_n);
+       MLX5_ASSERT(nocmp_n <= pkts_n);
        rxq->cq_ci += nocmp_n;
        rxq->rq_pi += nocmp_n;
        rcvd_pkt += nocmp_n;
@@ -688,7 +687,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
 #endif
        /* Decompress the last CQE if compressed. */
        if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-               assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+               MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
                rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
                                                        &elts[nocmp_n]);
                /* Return more packets if needed. */
index b037f77..cf2b433 100644 (file)
@@ -126,7 +126,7 @@ error:
 static int
 mlx5_pmd_interrupt_handler_install(void)
 {
-       assert(server_socket);
+       MLX5_ASSERT(server_socket);
        server_intr_handle.fd = server_socket;
        server_intr_handle.type = RTE_INTR_HANDLE_EXT;
        return rte_intr_callback_register(&server_intr_handle,
@@ -166,7 +166,7 @@ mlx5_pmd_socket_init(void)
        int ret = -1;
        int flags;
 
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
        if (server_socket)
                return 0;
        /*
index 4c69e77..7603502 100644 (file)
@@ -306,7 +306,7 @@ mlx5_stats_init(struct rte_eth_dev *dev)
                        xstats_ctrl->info[idx] = mlx5_counters_init[i];
                }
        }
-       assert(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
+       MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
        xstats_ctrl->stats_n = dev_stats_n;
        /* Copy to base at first time. */
        ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
index 5c91adf..bc13abf 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <errno.h>
 #include <string.h>
 #include <stdint.h>
@@ -83,7 +82,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
        while (elts_tail != elts_head) {
                struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
 
-               assert(elt != NULL);
+               MLX5_ASSERT(elt != NULL);
                rte_pktmbuf_free_seg(elt);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
                /* Poisoning. */
@@ -347,8 +346,8 @@ txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
 
        if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                return;
-       assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
-       assert(ppriv);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+       MLX5_ASSERT(ppriv);
        ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
        txq_uar_ncattr_init(txq_ctrl, page_size);
 #ifndef RTE_ARCH_64
@@ -386,7 +385,7 @@ txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
 
        if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                return 0;
-       assert(ppriv);
+       MLX5_ASSERT(ppriv);
        /*
         * As rdma-core, UARs are mapped in size of OS page
         * size. Ref to libmlx5 function: mlx5_init_context()
@@ -447,7 +446,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
        unsigned int i;
        int ret;
 
-       assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+       MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
        for (i = 0; i != priv->txqs_n; ++i) {
                if (!(*priv->txqs)[i])
                        continue;
@@ -455,7 +454,7 @@ mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
                txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
                if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
                        continue;
-               assert(txq->idx == (uint16_t)i);
+               MLX5_ASSERT(txq->idx == (uint16_t)i);
                ret = txq_uar_init_secondary(txq_ctrl, fd);
                if (ret)
                        goto error;
@@ -495,8 +494,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
        struct mlx5_txq_obj *tmpl = NULL;
        int ret = 0;
 
-       assert(txq_data);
-       assert(!txq_ctrl->obj);
+       MLX5_ASSERT(txq_data);
+       MLX5_ASSERT(!txq_ctrl->obj);
        tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
                                 txq_ctrl->socket);
        if (!tmpl) {
@@ -581,7 +580,7 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
        if (priv->config.devx && !priv->sh->tdn)
                qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
 #endif
-       assert(txq_data);
+       MLX5_ASSERT(txq_data);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
        priv->verbs_alloc_ctx.obj = txq_ctrl;
        if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
@@ -832,7 +831,7 @@ mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
 int
 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
 {
-       assert(txq_obj);
+       MLX5_ASSERT(txq_obj);
        if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
                if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
                        if (txq_obj->tis)
@@ -1050,12 +1049,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                 * beginning of inlining buffer in Ethernet
                 * Segment.
                 */
-               assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
-               assert(inlen_send <= MLX5_WQE_SIZE_MAX +
-                                    MLX5_ESEG_MIN_INLINE_SIZE -
-                                    MLX5_WQE_CSEG_SIZE -
-                                    MLX5_WQE_ESEG_SIZE -
-                                    MLX5_WQE_DSEG_SIZE * 2);
+               MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+               MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
+                                         MLX5_ESEG_MIN_INLINE_SIZE -
+                                         MLX5_WQE_CSEG_SIZE -
+                                         MLX5_WQE_ESEG_SIZE -
+                                         MLX5_WQE_DSEG_SIZE * 2);
        } else if (inlen_mode) {
                /*
                 * If minimal inlining is requested we must
@@ -1105,12 +1104,12 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
                                PORT_ID(priv), inlen_empw, temp);
                        inlen_empw = temp;
                }
-               assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
-               assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
-                                    MLX5_DSEG_MIN_INLINE_SIZE -
-                                    MLX5_WQE_CSEG_SIZE -
-                                    MLX5_WQE_ESEG_SIZE -
-                                    MLX5_WQE_DSEG_SIZE);
+               MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+               MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
+                                         MLX5_DSEG_MIN_INLINE_SIZE -
+                                         MLX5_WQE_CSEG_SIZE -
+                                         MLX5_WQE_ESEG_SIZE -
+                                         MLX5_WQE_DSEG_SIZE);
                txq_ctrl->txq.inlen_empw = inlen_empw;
        }
        txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
@@ -1225,11 +1224,11 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
        }
        txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
                                            txq_ctrl->txq.inlen_empw);
-       assert(txq_ctrl->max_inline_data <= max_inline);
-       assert(txq_ctrl->txq.inlen_mode <= max_inline);
-       assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
-       assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
-              !txq_ctrl->txq.inlen_empw);
+       MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
+       MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
+                   !txq_ctrl->txq.inlen_empw);
        return 0;
 error:
        rte_errno = ENOMEM;
@@ -1275,7 +1274,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        }
        /* Save pointer of global generation number to check memory event. */
        tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
-       assert(desc > MLX5_TX_COMP_THRESH);
+       MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
        tmpl->txq.offloads = conf->offloads |
                             dev->data->dev_conf.txmode.offloads;
        tmpl->priv = priv;
index 5d86615..4b4fc3c 100644 (file)
@@ -49,7 +49,7 @@ mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key)
        struct mlx5_hlist_head *first;
        struct mlx5_hlist_entry *node;
 
-       assert(h);
+       MLX5_ASSERT(h);
        idx = rte_hash_crc_8byte(key, 0) & h->mask;
        first = &h->heads[idx];
        LIST_FOREACH(node, first, next) {
@@ -66,7 +66,7 @@ mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
        struct mlx5_hlist_head *first;
        struct mlx5_hlist_entry *node;
 
-       assert(h && entry);
+       MLX5_ASSERT(h && entry);
        idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
        first = &h->heads[idx];
        /* No need to reuse the lookup function. */
@@ -82,7 +82,7 @@ void
 mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
                  struct mlx5_hlist_entry *entry)
 {
-       assert(entry && entry->next.le_prev);
+       MLX5_ASSERT(entry && entry->next.le_prev);
        LIST_REMOVE(entry, next);
        /* Set to NULL to get rid of removing action for more than once. */
        entry->next.le_prev = NULL;
@@ -95,7 +95,7 @@ mlx5_hlist_destroy(struct mlx5_hlist *h,
        uint32_t idx;
        struct mlx5_hlist_entry *entry;
 
-       assert(h);
+       MLX5_ASSERT(h);
        for (idx = 0; idx < h->table_sz; ++idx) {
                /* no LIST_FOREACH_SAFE, using while instead */
                while (!LIST_EMPTY(&h->heads[idx])) {
index c868aee..8f305c3 100644 (file)
@@ -10,7 +10,6 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <limits.h>
-#include <assert.h>
 #include <errno.h>
 
 #include <mlx5_common.h>
 #define bool _Bool
 #endif
 
-/* Bit-field manipulation. */
-#define BITFIELD_DECLARE(bf, type, size) \
-       type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
-                !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
-#define BITFIELD_DEFINE(bf, type, size) \
-       BITFIELD_DECLARE((bf), type, (size)) = { 0 }
-#define BITFIELD_SET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
-               ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_RESET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
-               ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_ISSET(bf, b) \
-       (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-        !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
-            ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
-
 /* Convert a bit number to the corresponding 64-bit mask */
 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
 
index 8e63b67..f65e416 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <stddef.h>
 #include <errno.h>
-#include <assert.h>
 #include <stdint.h>
 #include <unistd.h>
 
@@ -61,7 +60,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
        DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
                dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
-       assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
+       MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
        for (i = 0; (i != priv->vlan_filter_n); ++i)
                if (priv->vlan_filter[i] == vlan_id)
                        break;
@@ -71,7 +70,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
                return -rte_errno;
        }
        if (i < priv->vlan_filter_n) {
-               assert(priv->vlan_filter_n != 0);
+               MLX5_ASSERT(priv->vlan_filter_n != 0);
                /* Enabling an existing VLAN filter has no effect. */
                if (on)
                        goto out;
@@ -83,7 +82,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
                        (priv->vlan_filter_n - i));
                priv->vlan_filter[priv->vlan_filter_n] = 0;
        } else {
-               assert(i == priv->vlan_filter_n);
+               MLX5_ASSERT(i == priv->vlan_filter_n);
                /* Disabling an unknown VLAN filter has no effect. */
                if (!on)
                        goto out;
@@ -214,12 +213,12 @@ void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
        struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
        struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
 
-       assert(vlan->created);
-       assert(priv->vmwa_context);
+       MLX5_ASSERT(vlan->created);
+       MLX5_ASSERT(priv->vmwa_context);
        if (!vlan->created || !vmwa)
                return;
        vlan->created = 0;
-       assert(vlan_dev[vlan->tag].refcnt);
+       MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
        if (--vlan_dev[vlan->tag].refcnt == 0 &&
            vlan_dev[vlan->tag].ifindex) {
                mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
@@ -242,12 +241,12 @@ void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
        struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
        struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
 
-       assert(!vlan->created);
-       assert(priv->vmwa_context);
+       MLX5_ASSERT(!vlan->created);
+       MLX5_ASSERT(priv->vmwa_context);
        if (vlan->created || !vmwa)
                return;
        if (vlan_dev[vlan->tag].refcnt == 0) {
-               assert(!vlan_dev[vlan->tag].ifindex);
+               MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
                vlan_dev[vlan->tag].ifindex =
                        mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
                                                 vlan->tag);