The macro RTE_VERIFY always checks a condition.
It is optimized with "unlikely" hint.
While this macro is well suited for test applications, it is preferred
in libraries and examples to enable such check in debug mode.
That's why the macro RTE_ASSERT is introduced to call RTE_VERIFY only
if built with debug logs enabled.
A lot of assert macros were duplicated and enabled with a specific flag.
Removing these #ifdef allows to test these code branches more easily
and avoid dead code pitfalls.
The ENA_ASSERT is kept (in debug mode only) because it has more
parameters to log.
Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
Then, the packet is checked to see if it has a multicast destination address and
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, ðdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
/* attach mux to aggregator */
- RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
+ RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
STATE_DISTRIBUTING)) == 0);
ACTOR_STATE_SET(port, SYNCHRONIZATION);
struct lacpdu_header *lacp;
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
/* This is LACP frame so pass it to rx_machine */
rx_machine(internals, slave_id, &lacp->lacpdu);
uint16_t q_id;
/* Given slave mus not be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) == internals->active_slave_count);
+ RTE_SET_USED(internals); /* used only for assert when enabled */
memcpy(&port->actor, &initial, sizeof(struct port_params));
/* Standard requires that port ID must be grater than 0.
if (port->mbuf_pool != NULL)
return;
- RTE_VERIFY(port->rx_ring == NULL);
- RTE_VERIFY(port->tx_ring == NULL);
+ RTE_ASSERT(port->rx_ring == NULL);
+ RTE_ASSERT(port->tx_ring == NULL);
socket_id = rte_eth_devices[slave_id].data->numa_node;
element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
uint8_t i;
/* Given slave must be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) < internals->active_slave_count);
/* Exclude slave from transmit policy. If this slave is an aggregator
internals->tlb_slaves_order[active_count] = port_id;
}
- RTE_VERIFY(internals->active_slave_count <
+ RTE_ASSERT(internals->active_slave_count <
(RTE_DIM(internals->active_slaves) - 1));
internals->active_slaves[internals->active_slave_count] = port_id;
sizeof(internals->active_slaves[0]));
}
- RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
+ RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
internals->active_slave_count = active_count;
if (eth_dev->data->dev_started) {
for (i = 0; i < internals->active_slave_count; i++) {
port = &mode_8023ad_ports[internals->active_slaves[i]];
- RTE_VERIFY(port->rx_ring != NULL);
+ RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
- RTE_VERIFY(port->tx_ring != NULL);
+ RTE_ASSERT(port->tx_ring != NULL);
while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
}
#define ENA_GET_SYSTEM_USECS() \
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
#define ENA_ASSERT(cond, format, arg...) \
do { \
if (unlikely(!(cond))) { \
- printf("Assertion failed on %s:%s:%d: " format, \
- __FILE__, __func__, __LINE__, ##arg); \
- rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \
+ RTE_LOG(ERR, PMD, format, ##arg); \
+ rte_panic("line %d\tassert \"" #cond "\"" \
+ "failed\n", __LINE__); \
} \
} while (0)
+#else
+#define ENA_ASSERT(cond, format, arg...) do {} while (0)
+#endif
#define ENA_MAX32(x, y) RTE_MAX((x), (y))
#define ENA_MAX16(x, y) RTE_MAX((x), (y))
return (struct enic *)eth_dev->data->dev_private;
}
-#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#define ASSERT(x) do { \
- if (!(x)) \
- rte_panic("ENIC: x"); \
-} while (0)
-#else
-#define ASSERT(x)
-#endif
-
extern void enic_fdir_stats_get(struct enic *enic,
struct rte_eth_fdir_stats *stats);
extern int enic_fdir_add_fltr(struct enic *enic,
enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
{
uint32_t d = i0 + i1;
- ASSERT(i0 < n_descriptors);
- ASSERT(i1 < n_descriptors);
+ RTE_ASSERT(i0 < n_descriptors);
+ RTE_ASSERT(i1 < n_descriptors);
d -= (d >= n_descriptors) ? n_descriptors : 0;
return d;
}
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-#define VMXNET3_ASSERT(x) do { \
- if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
-} while(0)
-#else
-#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
-#endif
-
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
struct rte_mbuf *mbuf;
/* Release cmd_ring descriptor and free mbuf */
- VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+ RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
mbuf = txq->cmd_ring.buf_info[eop_idx].m;
if (mbuf == NULL)
while (txq->cmd_ring.next2comp != eop_idx) {
/* no out-of-order completion */
- VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+ RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
completed++;
}
if (tso) {
uint16_t mss = txm->tso_segsz;
- VMXNET3_ASSERT(mss > 0);
+ RTE_ASSERT(mss > 0);
gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
gdesc->txd.om = VMXNET3_OM_TSO;
idx = rcd->rxdIdx;
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+ RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
- VMXNET3_ASSERT(rcd->len <= rxd->len);
- VMXNET3_ASSERT(rbi->m);
+ RTE_ASSERT(rcd->len <= rxd->len);
+ RTE_ASSERT(rbi->m);
/* Get the packet buffer pointer from buf_info */
rxm = rbi->m;
* the last mbuf of the current packet.
*/
if (rcd->sop) {
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
if (unlikely(rcd->len == 0)) {
- VMXNET3_ASSERT(rcd->eop);
+ RTE_ASSERT(rcd->eop);
PMD_RX_LOG(DEBUG,
"Rx buf was skipped. rxring[%d][%d])",
} else {
struct rte_mbuf *start = rxq->start_seg;
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
start->pkt_len += rxm->data_len;
start->nb_segs++;
obj_init, obj_init_arg,
socket_id, flags, va, pa_arr, rpg_num, pg_shift);
- RTE_VERIFY(elt_num == mp->size);
+ RTE_ASSERT(elt_num == mp->size);
}
mgi.mp = mp;
mgi.pg_num = rpg_num;
unsigned left = n & 0x7;
unsigned i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
int32_t payload_len;
struct rte_crypto_sym_op *sym_cop;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
sa->digest_len;
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
char *padding;
struct rte_crypto_sym_op *sym_cop;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
/* Payload length */
pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
- IPSEC_ASSERT(padding != NULL);
+ RTE_ASSERT(padding != NULL);
ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
sa->src, sa->dst);
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
inip = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
offset += sizeof(struct ip);
outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
- IPSEC_ASSERT(outip != NULL);
+ RTE_ASSERT(outip != NULL);
/* Per RFC4301 5.1.2.1 */
outip->ip_v = IPVERSION;
outip = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(outip->ip_v == IPVERSION);
+ RTE_ASSERT(outip->ip_v == IPVERSION);
offset += sizeof(struct ip);
inip = (struct ip *)rte_pktmbuf_adj(m, offset);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
/* Check packet is still bigger than IP header (inner) */
- IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
+ RTE_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
/* RFC4301 5.1.2.1 Note 6 */
if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
sa = sas[i];
priv->sa = sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
continue;
}
- IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+ RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
}
priv = get_priv(pkt);
sa = priv->sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
ret = sa->post_crypto(pkt, sa, cops[j]);
if (unlikely(ret))
#define MAX_PKT_BURST 32
#define MAX_QP_PER_LCORE 256
-#ifdef IPSEC_DEBUG
-#define IPSEC_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-#else
-#define IPSEC_ASSERT(exp) do {} while (0)
-#endif /* IPSEC_DEBUG */
-
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
#define uint32_t_to_char(ip, a, b, c, d) do {\
/* Construct Ethernet header. */
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, ðdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
struct lthread_stack *s;
s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
- LTHREAD_ASSERT(s != NULL);
+ RTE_ASSERT(s != NULL);
s->root_sched = THIS_SCHED;
s->stack_size = LTHREAD_MAX_STACK_SIZE;
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
-/*
- * Assert
- */
-#if LTHREAD_DIAG
-#define LTHREAD_ASSERT(expr) do { \
- if (!(expr)) \
- rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
-} while (0)
-#else
-#define LTHREAD_ASSERT(expr) do {} while (0)
-#endif
-
#endif /* LTHREAD_INT_H */
_suspend();
/* resumed, must loop and compete for the lock again */
}
- LTHREAD_ASSERT(0);
return 0;
}
if (unblocked != NULL) {
rte_atomic64_dec(&m->count);
DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
- LTHREAD_ASSERT(unblocked->sched != NULL);
+ RTE_ASSERT(unblocked->sched != NULL);
_ready_queue_insert((struct lthread_sched *)
unblocked->sched, unblocked);
break;
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p);
+ RTE_ASSERT(p);
p->stub = rte_malloc_socket(NULL,
sizeof(struct qnode),
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p->stub);
+ RTE_ASSERT(p->stub);
if (name != NULL)
strncpy(p->name, name, LT_MAX_NAME_SIZE);
/* allocated stub node */
stub = _qnode_alloc();
- LTHREAD_ASSERT(stub);
+ RTE_ASSERT(stub);
if (name != NULL)
strncpy(new_queue->name, name, sizeof(new_queue->name));
struct lthread_sched *new_sched;
unsigned lcoreid = rte_lcore_id();
- LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
+ RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
if (stack_size == 0)
stack_size = LTHREAD_MAX_STACK_SIZE;
pool = rte_ring_create(name,
LTHREAD_MAX_KEYS, 0, 0);
- LTHREAD_ASSERT(pool);
+ RTE_ASSERT(pool);
int i;
tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
- LTHREAD_ASSERT(tls != NULL);
+ RTE_ASSERT(tls != NULL);
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
* the implementation is architecture-specific.
*/
+#include "rte_log.h"
+
#ifdef __cplusplus
extern "C" {
#endif
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define RTE_ASSERT(exp) RTE_VERIFY(exp)
+#else
+#define RTE_ASSERT(exp) do {} while (0)
+#endif
#define RTE_VERIFY(exp) do { \
- if (!(exp)) \
+ if (unlikely(!(exp))) \
rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
} while (0)
/* logging macros. */
#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
-
#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
-
-#define IP_FRAG_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
- __func__, __LINE__); \
-}
#else
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#define IP_FRAG_ASSERT(exp) do {} while (0)
#endif /* IP_FRAG_DEBUG */
#define IPV4_KEYLEN 1
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
/* Fragment size should be a multiply of 8. */
- IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
- IP_FRAG_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
struct rte_pktmbuf_pool_private default_mbp_priv;
uint16_t roomsz;
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
/* if no structure is provided, assume no mbuf private area */
user_mbp_priv = opaque_arg;
user_mbp_priv = &default_mbp_priv;
}
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
user_mbp_priv->mbuf_data_room_size +
user_mbp_priv->mbuf_priv_size);
mbuf_size = sizeof(struct rte_mbuf) + priv_size;
buf_len = rte_pktmbuf_data_room_size(mp);
- RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
- RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
- RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
+ RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
+ RTE_ASSERT(mp->elt_size >= mbuf_size);
+ RTE_ASSERT(buf_len <= UINT16_MAX);
memset(m, 0, mp->elt_size);
rte_mbuf_sanity_check(m, is_h); \
} while (0)
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
#else /* RTE_LIBRTE_MBUF_DEBUG */
/** check mbuf type in debug mode */
/** check mbuf type in debug mode if mbuf pointer is not null */
#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) do { } while (0)
-
#endif /* RTE_LIBRTE_MBUF_DEBUG */
#ifdef RTE_MBUF_REFCNT_ATOMIC
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
return m;
}
static inline void __attribute__((always_inline))
__rte_mbuf_raw_free(struct rte_mbuf *m)
{
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mempool_put(m->pool, m);
}
switch (count % 4) {
case 0:
while (idx != count) {
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 3:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 2:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 1:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
{
struct rte_mbuf *md;
- RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) &&
+ RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
/* if m is not direct, get the mbuf that embeds the data */
unsigned common_count;
unsigned cache_count;
- RTE_VERIFY(f != NULL);
- RTE_VERIFY(mp != NULL);
+ RTE_ASSERT(f != NULL);
+ RTE_ASSERT(mp != NULL);
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
scale = 1024.0;
- RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
+ RTE_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
double n = (double)i;
#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
-#ifdef RTE_RED_DEBUG
-
-#define RTE_RED_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#else
-
-#define RTE_RED_ASSERT(exp) do { } while(0)
-
-#endif /* RTE_RED_DEBUG */
-
/**
* Externs
*
{
uint64_t time_diff = 0, m = 0;
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
red->count ++;
struct rte_red *red,
const unsigned q)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
/**
* EWMA filter (Sally Floyd and Van Jacobson):
const unsigned q,
const uint64_t time)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
if (q != 0) {
return rte_red_enqueue_nonempty(red_cfg, red, q);