1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_debug.h"
35 #define EFSYS_HAS_UINT64 1
36 #define EFSYS_USE_UINT64 1
37 #define EFSYS_HAS_SSE2_M128 1
39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
40 #define EFSYS_IS_BIG_ENDIAN 1
41 #define EFSYS_IS_LITTLE_ENDIAN 0
42 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
43 #define EFSYS_IS_BIG_ENDIAN 0
44 #define EFSYS_IS_LITTLE_ENDIAN 1
46 #error "Cannot determine system endianness"
48 #include "efx_types.h"
55 typedef bool boolean_t;
65 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
66 * expression allowed only inside a function, but MAX() is used as
67 * a number of elements in array.
70 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
73 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
76 /* There are macros for alignment in DPDK, but we need to make a proper
77 * correspondence here, if we want to re-use them at all
80 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
84 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
88 #define P2ALIGN(_x, _a) ((_x) & -(_a))
92 #define ISP2(x) rte_is_power_of_2(x)
95 #define ENOTACTIVE ENOTCONN
98 prefetch_read_many(const volatile void *addr)
104 prefetch_read_once(const volatile void *addr)
106 rte_prefetch_non_temporal(addr);
109 /* Modifiers used for Windows builds */
112 #define __in_ecount(_n)
113 #define __in_ecount_opt(_n)
114 #define __in_bcount(_n)
115 #define __in_bcount_opt(_n)
119 #define __out_ecount(_n)
120 #define __out_ecount_opt(_n)
121 #define __out_bcount(_n)
122 #define __out_bcount_opt(_n)
123 #define __out_bcount_part(_n, _l)
124 #define __out_bcount_part_opt(_n, _l)
130 #define __inout_ecount(_n)
131 #define __inout_ecount_opt(_n)
132 #define __inout_bcount(_n)
133 #define __inout_bcount_opt(_n)
134 #define __inout_bcount_full_opt(_n)
136 #define __deref_out_bcount_opt(n)
138 #define __checkReturn
139 #define __success(_x)
141 #define __drv_when(_p, _c)
143 /* Code inclusion options */
146 #define EFSYS_OPT_NAMES 1
148 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
149 #define EFSYS_OPT_SIENA 0
150 /* Enable SFN7xxx support */
151 #define EFSYS_OPT_HUNTINGTON 1
152 /* Enable SFN8xxx support */
153 #define EFSYS_OPT_MEDFORD 1
154 /* Enable SFN2xxx support */
155 #define EFSYS_OPT_MEDFORD2 1
156 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
157 #define EFSYS_OPT_CHECK_REG 1
159 #define EFSYS_OPT_CHECK_REG 0
162 /* MCDI is required for SFN7xxx and SFN8xx */
163 #define EFSYS_OPT_MCDI 1
164 #define EFSYS_OPT_MCDI_LOGGING 1
165 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
167 #define EFSYS_OPT_MAC_STATS 1
169 #define EFSYS_OPT_LOOPBACK 1
171 #define EFSYS_OPT_MON_MCDI 0
172 #define EFSYS_OPT_MON_STATS 0
174 #define EFSYS_OPT_PHY_STATS 0
175 #define EFSYS_OPT_BIST 0
176 #define EFSYS_OPT_PHY_LED_CONTROL 0
177 #define EFSYS_OPT_PHY_FLAGS 0
179 #define EFSYS_OPT_VPD 0
180 #define EFSYS_OPT_NVRAM 0
181 #define EFSYS_OPT_BOOTCFG 0
182 #define EFSYS_OPT_IMAGE_LAYOUT 0
184 #define EFSYS_OPT_DIAG 0
185 #define EFSYS_OPT_RX_SCALE 1
186 #define EFSYS_OPT_QSTATS 0
187 /* Filters support is required for SFN7xxx and SFN8xx */
188 #define EFSYS_OPT_FILTER 1
189 #define EFSYS_OPT_RX_SCATTER 0
191 #define EFSYS_OPT_EV_PREFETCH 0
193 #define EFSYS_OPT_DECODE_INTR_FATAL 0
195 #define EFSYS_OPT_LICENSING 0
197 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
199 #define EFSYS_OPT_RX_PACKED_STREAM 0
201 #define EFSYS_OPT_TUNNEL 1
203 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
207 typedef struct __efsys_identifier_s efsys_identifier_t;
210 #define EFSYS_PROBE(_name) \
213 #define EFSYS_PROBE1(_name, _type1, _arg1) \
216 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
219 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
223 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
224 _type3, _arg3, _type4, _arg4) \
227 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
228 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
231 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
232 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
236 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
237 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
238 _type6, _arg6, _type7, _arg7) \
244 typedef rte_iova_t efsys_dma_addr_t;
246 typedef struct efsys_mem_s {
247 const struct rte_memzone *esm_mz;
249 * Ideally it should have volatile qualifier to denote that
250 * the memory may be updated by someone else. However, it adds
251 * qualifier discard warnings when the pointer or its derivative
252 * is passed to memset() or rte_mov16().
253 * So, skip the qualifier here, but make sure that it is added
254 * below in access macros.
257 efsys_dma_addr_t esm_addr;
261 #define EFSYS_MEM_ZERO(_esmp, _size) \
263 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
265 _NOTE(CONSTANTCONDITION); \
268 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
270 volatile uint8_t *_base = (_esmp)->esm_base; \
271 volatile uint32_t *_addr; \
273 _NOTE(CONSTANTCONDITION); \
274 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
276 _addr = (volatile uint32_t *)(_base + (_offset)); \
277 (_edp)->ed_u32[0] = _addr[0]; \
279 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
280 uint32_t, (_edp)->ed_u32[0]); \
282 _NOTE(CONSTANTCONDITION); \
285 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
287 volatile uint8_t *_base = (_esmp)->esm_base; \
288 volatile uint64_t *_addr; \
290 _NOTE(CONSTANTCONDITION); \
291 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
293 _addr = (volatile uint64_t *)(_base + (_offset)); \
294 (_eqp)->eq_u64[0] = _addr[0]; \
296 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
297 uint32_t, (_eqp)->eq_u32[1], \
298 uint32_t, (_eqp)->eq_u32[0]); \
300 _NOTE(CONSTANTCONDITION); \
303 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
305 volatile uint8_t *_base = (_esmp)->esm_base; \
306 volatile __m128i *_addr; \
308 _NOTE(CONSTANTCONDITION); \
309 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
311 _addr = (volatile __m128i *)(_base + (_offset)); \
312 (_eop)->eo_u128[0] = _addr[0]; \
314 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
315 uint32_t, (_eop)->eo_u32[3], \
316 uint32_t, (_eop)->eo_u32[2], \
317 uint32_t, (_eop)->eo_u32[1], \
318 uint32_t, (_eop)->eo_u32[0]); \
320 _NOTE(CONSTANTCONDITION); \
324 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
326 volatile uint8_t *_base = (_esmp)->esm_base; \
327 volatile uint32_t *_addr; \
329 _NOTE(CONSTANTCONDITION); \
330 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
332 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
333 uint32_t, (_edp)->ed_u32[0]); \
335 _addr = (volatile uint32_t *)(_base + (_offset)); \
336 _addr[0] = (_edp)->ed_u32[0]; \
338 _NOTE(CONSTANTCONDITION); \
341 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
343 volatile uint8_t *_base = (_esmp)->esm_base; \
344 volatile uint64_t *_addr; \
346 _NOTE(CONSTANTCONDITION); \
347 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
349 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
350 uint32_t, (_eqp)->eq_u32[1], \
351 uint32_t, (_eqp)->eq_u32[0]); \
353 _addr = (volatile uint64_t *)(_base + (_offset)); \
354 _addr[0] = (_eqp)->eq_u64[0]; \
356 _NOTE(CONSTANTCONDITION); \
359 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
361 volatile uint8_t *_base = (_esmp)->esm_base; \
362 volatile __m128i *_addr; \
364 _NOTE(CONSTANTCONDITION); \
365 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
368 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
369 uint32_t, (_eop)->eo_u32[3], \
370 uint32_t, (_eop)->eo_u32[2], \
371 uint32_t, (_eop)->eo_u32[1], \
372 uint32_t, (_eop)->eo_u32[0]); \
374 _addr = (volatile __m128i *)(_base + (_offset)); \
375 _addr[0] = (_eop)->eo_u128[0]; \
377 _NOTE(CONSTANTCONDITION); \
381 #define EFSYS_MEM_SIZE(_esmp) \
382 ((_esmp)->esm_mz->len)
384 #define EFSYS_MEM_ADDR(_esmp) \
387 #define EFSYS_MEM_IS_NULL(_esmp) \
388 ((_esmp)->esm_base == NULL)
390 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
392 volatile uint8_t *_base = (_esmp)->esm_base; \
394 rte_prefetch0(_base + (_offset)); \
400 typedef struct efsys_bar_s {
401 rte_spinlock_t esb_lock;
403 struct rte_pci_device *esb_dev;
405 * Ideally it should have volatile qualifier to denote that
406 * the memory may be updated by someone else. However, it adds
407 * qualifier discard warnings when the pointer or its derivative
408 * is passed to memset() or rte_mov16().
409 * So, skip the qualifier here, but make sure that it is added
410 * below in access macros.
415 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
417 rte_spinlock_init(&(_esbp)->esb_lock); \
418 _NOTE(CONSTANTCONDITION); \
420 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
421 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
422 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
424 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
426 volatile uint8_t *_base = (_esbp)->esb_base; \
427 volatile uint32_t *_addr; \
429 _NOTE(CONSTANTCONDITION); \
430 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
431 _NOTE(CONSTANTCONDITION); \
433 SFC_BAR_LOCK(_esbp); \
435 _addr = (volatile uint32_t *)(_base + (_offset)); \
437 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
439 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
440 uint32_t, (_edp)->ed_u32[0]); \
442 _NOTE(CONSTANTCONDITION); \
444 SFC_BAR_UNLOCK(_esbp); \
445 _NOTE(CONSTANTCONDITION); \
448 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
450 volatile uint8_t *_base = (_esbp)->esb_base; \
451 volatile uint64_t *_addr; \
453 _NOTE(CONSTANTCONDITION); \
454 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
456 SFC_BAR_LOCK(_esbp); \
458 _addr = (volatile uint64_t *)(_base + (_offset)); \
460 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
462 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
463 uint32_t, (_eqp)->eq_u32[1], \
464 uint32_t, (_eqp)->eq_u32[0]); \
466 SFC_BAR_UNLOCK(_esbp); \
467 _NOTE(CONSTANTCONDITION); \
470 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
472 volatile uint8_t *_base = (_esbp)->esb_base; \
473 volatile __m128i *_addr; \
475 _NOTE(CONSTANTCONDITION); \
476 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
478 _NOTE(CONSTANTCONDITION); \
480 SFC_BAR_LOCK(_esbp); \
482 _addr = (volatile __m128i *)(_base + (_offset)); \
484 /* There is no rte_read128_relaxed() yet */ \
485 (_eop)->eo_u128[0] = _addr[0]; \
487 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
488 uint32_t, (_eop)->eo_u32[3], \
489 uint32_t, (_eop)->eo_u32[2], \
490 uint32_t, (_eop)->eo_u32[1], \
491 uint32_t, (_eop)->eo_u32[0]); \
493 _NOTE(CONSTANTCONDITION); \
495 SFC_BAR_UNLOCK(_esbp); \
496 _NOTE(CONSTANTCONDITION); \
500 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
502 volatile uint8_t *_base = (_esbp)->esb_base; \
503 volatile uint32_t *_addr; \
505 _NOTE(CONSTANTCONDITION); \
506 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
508 _NOTE(CONSTANTCONDITION); \
510 SFC_BAR_LOCK(_esbp); \
512 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
513 uint32_t, (_edp)->ed_u32[0]); \
515 _addr = (volatile uint32_t *)(_base + (_offset)); \
516 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
519 _NOTE(CONSTANTCONDITION); \
521 SFC_BAR_UNLOCK(_esbp); \
522 _NOTE(CONSTANTCONDITION); \
525 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
527 volatile uint8_t *_base = (_esbp)->esb_base; \
528 volatile uint64_t *_addr; \
530 _NOTE(CONSTANTCONDITION); \
531 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
533 SFC_BAR_LOCK(_esbp); \
535 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
536 uint32_t, (_eqp)->eq_u32[1], \
537 uint32_t, (_eqp)->eq_u32[0]); \
539 _addr = (volatile uint64_t *)(_base + (_offset)); \
540 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
543 SFC_BAR_UNLOCK(_esbp); \
544 _NOTE(CONSTANTCONDITION); \
548 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
549 * (required by PIO hardware).
551 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
552 * write-combined memory mapped to user-land, so just abort if used.
554 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
556 rte_panic("Write-combined BAR access not supported"); \
559 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
561 volatile uint8_t *_base = (_esbp)->esb_base; \
562 volatile __m128i *_addr; \
564 _NOTE(CONSTANTCONDITION); \
565 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
567 _NOTE(CONSTANTCONDITION); \
569 SFC_BAR_LOCK(_esbp); \
571 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
572 uint32_t, (_eop)->eo_u32[3], \
573 uint32_t, (_eop)->eo_u32[2], \
574 uint32_t, (_eop)->eo_u32[1], \
575 uint32_t, (_eop)->eo_u32[0]); \
577 _addr = (volatile __m128i *)(_base + (_offset)); \
578 /* There is no rte_write128_relaxed() yet */ \
579 _addr[0] = (_eop)->eo_u128[0]; \
582 _NOTE(CONSTANTCONDITION); \
584 SFC_BAR_UNLOCK(_esbp); \
585 _NOTE(CONSTANTCONDITION); \
588 /* Use the standard octo-word write for doorbell writes */
589 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
591 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
592 _NOTE(CONSTANTCONDITION); \
597 #define EFSYS_SPIN(_us) \
600 _NOTE(CONSTANTCONDITION); \
603 #define EFSYS_SLEEP EFSYS_SPIN
607 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
608 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
613 * DPDK does not provide any DMA syncing API, and no PMD drivers
614 * have any traces of explicit DMA syncing.
615 * DMA mapping is assumed to be coherent.
618 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
620 /* Just avoid store and compiler (impliciltly) reordering */
621 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
625 typedef uint64_t efsys_timestamp_t;
627 #define EFSYS_TIMESTAMP(_usp) \
629 *(_usp) = rte_get_timer_cycles() * 1000000 / \
630 rte_get_timer_hz(); \
631 _NOTE(CONSTANTCONDITION); \
636 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
639 (_p) = rte_zmalloc("sfc", (_size), 0); \
640 _NOTE(CONSTANTCONDITION); \
643 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
648 _NOTE(CONSTANTCONDITION); \
653 typedef rte_spinlock_t efsys_lock_t;
655 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
656 rte_spinlock_init((_eslp))
657 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
658 #define SFC_EFSYS_LOCK(_eslp) \
659 rte_spinlock_lock((_eslp))
660 #define SFC_EFSYS_UNLOCK(_eslp) \
661 rte_spinlock_unlock((_eslp))
662 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
663 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
665 typedef int efsys_lock_state_t;
667 #define EFSYS_LOCK_MAGIC 0x000010c4
669 #define EFSYS_LOCK(_lockp, _state) \
671 SFC_EFSYS_LOCK(_lockp); \
672 (_state) = EFSYS_LOCK_MAGIC; \
673 _NOTE(CONSTANTCONDITION); \
676 #define EFSYS_UNLOCK(_lockp, _state) \
678 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
679 SFC_EFSYS_UNLOCK(_lockp); \
680 _NOTE(CONSTANTCONDITION); \
685 typedef uint64_t efsys_stat_t;
687 #define EFSYS_STAT_INCR(_knp, _delta) \
689 *(_knp) += (_delta); \
690 _NOTE(CONSTANTCONDITION); \
693 #define EFSYS_STAT_DECR(_knp, _delta) \
695 *(_knp) -= (_delta); \
696 _NOTE(CONSTANTCONDITION); \
699 #define EFSYS_STAT_SET(_knp, _val) \
702 _NOTE(CONSTANTCONDITION); \
705 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
707 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
708 _NOTE(CONSTANTCONDITION); \
711 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
713 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
714 _NOTE(CONSTANTCONDITION); \
717 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
719 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
720 _NOTE(CONSTANTCONDITION); \
723 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
725 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
726 _NOTE(CONSTANTCONDITION); \
731 #if EFSYS_OPT_DECODE_INTR_FATAL
732 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
735 SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
736 (_code), (_dword0), (_dword1)); \
737 _NOTE(CONSTANTCONDITION); \
743 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
744 * so we re-implement it here
746 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
747 #define EFSYS_ASSERT(_exp) \
749 if (unlikely(!(_exp))) \
750 rte_panic("line %d\tassert \"%s\" failed\n", \
751 __LINE__, (#_exp)); \
754 #define EFSYS_ASSERT(_exp) (void)(_exp)
757 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
759 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
760 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
761 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
765 #define EFSYS_HAS_ROTL_DWORD 0
771 #endif /* _SFC_COMMON_EFSYS_H */