1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_efx_debug.h"
29 #include "sfc_efx_log.h"
35 #define LIBEFX_API __rte_internal
37 /* No specific decorations required since functions are local by default */
38 #define LIBEFX_INTERNAL
40 #define EFSYS_HAS_UINT64 1
41 #define EFSYS_USE_UINT64 1
42 #define EFSYS_HAS_SSE2_M128 1
44 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
45 #define EFSYS_IS_BIG_ENDIAN 1
46 #define EFSYS_IS_LITTLE_ENDIAN 0
47 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
48 #define EFSYS_IS_BIG_ENDIAN 0
49 #define EFSYS_IS_LITTLE_ENDIAN 1
51 #error "Cannot determine system endianness"
53 #include "efx_types.h"
56 typedef bool boolean_t;
66 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
67 * expression allowed only inside a function, but MAX() is used as
68 * a number of elements in array.
71 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
74 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
78 #define ISP2(x) rte_is_power_of_2(x)
81 #define ENOTACTIVE ENOTCONN
84 prefetch_read_many(const volatile void *addr)
90 prefetch_read_once(const volatile void *addr)
92 rte_prefetch_non_temporal(addr);
95 /* Code inclusion options */
98 #define EFSYS_OPT_NAMES 1
100 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
101 #define EFSYS_OPT_SIENA 0
102 /* Enable SFN7xxx support */
103 #define EFSYS_OPT_HUNTINGTON 1
104 /* Enable SFN8xxx support */
105 #define EFSYS_OPT_MEDFORD 1
106 /* Enable SFN2xxx support */
107 #define EFSYS_OPT_MEDFORD2 1
108 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
109 #define EFSYS_OPT_CHECK_REG 1
111 #define EFSYS_OPT_CHECK_REG 0
114 /* MCDI is required for SFN7xxx and SFN8xx */
115 #define EFSYS_OPT_MCDI 1
116 #define EFSYS_OPT_MCDI_LOGGING 1
117 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
119 #define EFSYS_OPT_MAC_STATS 1
121 #define EFSYS_OPT_LOOPBACK 1
123 #define EFSYS_OPT_MON_MCDI 0
124 #define EFSYS_OPT_MON_STATS 0
126 #define EFSYS_OPT_PHY_STATS 0
127 #define EFSYS_OPT_BIST 0
128 #define EFSYS_OPT_PHY_LED_CONTROL 0
129 #define EFSYS_OPT_PHY_FLAGS 0
131 #define EFSYS_OPT_VPD 0
132 #define EFSYS_OPT_NVRAM 0
133 #define EFSYS_OPT_BOOTCFG 0
134 #define EFSYS_OPT_IMAGE_LAYOUT 0
136 #define EFSYS_OPT_DIAG 0
137 #define EFSYS_OPT_RX_SCALE 1
138 #define EFSYS_OPT_QSTATS 0
139 /* Filters support is required for SFN7xxx and SFN8xx */
140 #define EFSYS_OPT_FILTER 1
141 #define EFSYS_OPT_RX_SCATTER 0
143 #define EFSYS_OPT_EV_PREFETCH 0
145 #define EFSYS_OPT_DECODE_INTR_FATAL 0
147 #define EFSYS_OPT_LICENSING 0
149 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
151 #define EFSYS_OPT_RX_PACKED_STREAM 0
153 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
155 #define EFSYS_OPT_TUNNEL 1
157 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
159 #define EFSYS_OPT_EVB 0
161 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
165 typedef struct __efsys_identifier_s efsys_identifier_t;
168 #define EFSYS_PROBE(_name) \
171 #define EFSYS_PROBE1(_name, _type1, _arg1) \
174 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
177 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
181 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
182 _type3, _arg3, _type4, _arg4) \
185 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
186 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
189 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
190 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
194 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
195 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
196 _type6, _arg6, _type7, _arg7) \
202 typedef rte_iova_t efsys_dma_addr_t;
204 typedef struct efsys_mem_s {
205 const struct rte_memzone *esm_mz;
207 * Ideally it should have volatile qualifier to denote that
208 * the memory may be updated by someone else. However, it adds
209 * qualifier discard warnings when the pointer or its derivative
210 * is passed to memset() or rte_mov16().
211 * So, skip the qualifier here, but make sure that it is added
212 * below in access macros.
215 efsys_dma_addr_t esm_addr;
219 #define EFSYS_MEM_ZERO(_esmp, _size) \
221 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
223 _NOTE(CONSTANTCONDITION); \
226 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
228 volatile uint8_t *_base = (_esmp)->esm_base; \
229 volatile uint32_t *_addr; \
231 _NOTE(CONSTANTCONDITION); \
232 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
233 sizeof(efx_dword_t))); \
235 _addr = (volatile uint32_t *)(_base + (_offset)); \
236 (_edp)->ed_u32[0] = _addr[0]; \
238 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
239 uint32_t, (_edp)->ed_u32[0]); \
241 _NOTE(CONSTANTCONDITION); \
244 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
246 volatile uint8_t *_base = (_esmp)->esm_base; \
247 volatile uint64_t *_addr; \
249 _NOTE(CONSTANTCONDITION); \
250 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
251 sizeof(efx_qword_t))); \
253 _addr = (volatile uint64_t *)(_base + (_offset)); \
254 (_eqp)->eq_u64[0] = _addr[0]; \
256 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
257 uint32_t, (_eqp)->eq_u32[1], \
258 uint32_t, (_eqp)->eq_u32[0]); \
260 _NOTE(CONSTANTCONDITION); \
263 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
265 volatile uint8_t *_base = (_esmp)->esm_base; \
266 volatile __m128i *_addr; \
268 _NOTE(CONSTANTCONDITION); \
269 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
270 sizeof(efx_oword_t))); \
272 _addr = (volatile __m128i *)(_base + (_offset)); \
273 (_eop)->eo_u128[0] = _addr[0]; \
275 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
276 uint32_t, (_eop)->eo_u32[3], \
277 uint32_t, (_eop)->eo_u32[2], \
278 uint32_t, (_eop)->eo_u32[1], \
279 uint32_t, (_eop)->eo_u32[0]); \
281 _NOTE(CONSTANTCONDITION); \
285 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
287 volatile uint8_t *_base = (_esmp)->esm_base; \
288 volatile uint32_t *_addr; \
290 _NOTE(CONSTANTCONDITION); \
291 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
292 sizeof(efx_dword_t))); \
294 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
295 uint32_t, (_edp)->ed_u32[0]); \
297 _addr = (volatile uint32_t *)(_base + (_offset)); \
298 _addr[0] = (_edp)->ed_u32[0]; \
300 _NOTE(CONSTANTCONDITION); \
303 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
305 volatile uint8_t *_base = (_esmp)->esm_base; \
306 volatile uint64_t *_addr; \
308 _NOTE(CONSTANTCONDITION); \
309 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
310 sizeof(efx_qword_t))); \
312 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
313 uint32_t, (_eqp)->eq_u32[1], \
314 uint32_t, (_eqp)->eq_u32[0]); \
316 _addr = (volatile uint64_t *)(_base + (_offset)); \
317 _addr[0] = (_eqp)->eq_u64[0]; \
319 _NOTE(CONSTANTCONDITION); \
322 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
324 volatile uint8_t *_base = (_esmp)->esm_base; \
325 volatile __m128i *_addr; \
327 _NOTE(CONSTANTCONDITION); \
328 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
329 sizeof(efx_oword_t))); \
332 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
333 uint32_t, (_eop)->eo_u32[3], \
334 uint32_t, (_eop)->eo_u32[2], \
335 uint32_t, (_eop)->eo_u32[1], \
336 uint32_t, (_eop)->eo_u32[0]); \
338 _addr = (volatile __m128i *)(_base + (_offset)); \
339 _addr[0] = (_eop)->eo_u128[0]; \
341 _NOTE(CONSTANTCONDITION); \
345 #define EFSYS_MEM_SIZE(_esmp) \
346 ((_esmp)->esm_mz->len)
348 #define EFSYS_MEM_ADDR(_esmp) \
351 #define EFSYS_MEM_IS_NULL(_esmp) \
352 ((_esmp)->esm_base == NULL)
354 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
356 volatile uint8_t *_base = (_esmp)->esm_base; \
358 rte_prefetch0(_base + (_offset)); \
364 typedef struct efsys_bar_s {
365 rte_spinlock_t esb_lock;
367 struct rte_pci_device *esb_dev;
369 * Ideally it should have volatile qualifier to denote that
370 * the memory may be updated by someone else. However, it adds
371 * qualifier discard warnings when the pointer or its derivative
372 * is passed to memset() or rte_mov16().
373 * So, skip the qualifier here, but make sure that it is added
374 * below in access macros.
379 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
381 rte_spinlock_init(&(_esbp)->esb_lock); \
382 _NOTE(CONSTANTCONDITION); \
384 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
385 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
386 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
388 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
390 volatile uint8_t *_base = (_esbp)->esb_base; \
391 volatile uint32_t *_addr; \
393 _NOTE(CONSTANTCONDITION); \
394 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
395 sizeof(efx_dword_t))); \
396 _NOTE(CONSTANTCONDITION); \
398 SFC_BAR_LOCK(_esbp); \
400 _addr = (volatile uint32_t *)(_base + (_offset)); \
402 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
404 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
405 uint32_t, (_edp)->ed_u32[0]); \
407 _NOTE(CONSTANTCONDITION); \
409 SFC_BAR_UNLOCK(_esbp); \
410 _NOTE(CONSTANTCONDITION); \
413 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
415 volatile uint8_t *_base = (_esbp)->esb_base; \
416 volatile uint64_t *_addr; \
418 _NOTE(CONSTANTCONDITION); \
419 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
420 sizeof(efx_qword_t))); \
422 SFC_BAR_LOCK(_esbp); \
424 _addr = (volatile uint64_t *)(_base + (_offset)); \
426 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
428 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
429 uint32_t, (_eqp)->eq_u32[1], \
430 uint32_t, (_eqp)->eq_u32[0]); \
432 SFC_BAR_UNLOCK(_esbp); \
433 _NOTE(CONSTANTCONDITION); \
436 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
438 volatile uint8_t *_base = (_esbp)->esb_base; \
439 volatile __m128i *_addr; \
441 _NOTE(CONSTANTCONDITION); \
442 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
443 sizeof(efx_oword_t))); \
445 _NOTE(CONSTANTCONDITION); \
447 SFC_BAR_LOCK(_esbp); \
449 _addr = (volatile __m128i *)(_base + (_offset)); \
451 /* There is no rte_read128_relaxed() yet */ \
452 (_eop)->eo_u128[0] = _addr[0]; \
454 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
455 uint32_t, (_eop)->eo_u32[3], \
456 uint32_t, (_eop)->eo_u32[2], \
457 uint32_t, (_eop)->eo_u32[1], \
458 uint32_t, (_eop)->eo_u32[0]); \
460 _NOTE(CONSTANTCONDITION); \
462 SFC_BAR_UNLOCK(_esbp); \
463 _NOTE(CONSTANTCONDITION); \
467 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
469 volatile uint8_t *_base = (_esbp)->esb_base; \
470 volatile uint32_t *_addr; \
472 _NOTE(CONSTANTCONDITION); \
473 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
474 sizeof(efx_dword_t))); \
476 _NOTE(CONSTANTCONDITION); \
478 SFC_BAR_LOCK(_esbp); \
480 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
481 uint32_t, (_edp)->ed_u32[0]); \
483 _addr = (volatile uint32_t *)(_base + (_offset)); \
484 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
487 _NOTE(CONSTANTCONDITION); \
489 SFC_BAR_UNLOCK(_esbp); \
490 _NOTE(CONSTANTCONDITION); \
493 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
495 volatile uint8_t *_base = (_esbp)->esb_base; \
496 volatile uint64_t *_addr; \
498 _NOTE(CONSTANTCONDITION); \
499 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
500 sizeof(efx_qword_t))); \
502 SFC_BAR_LOCK(_esbp); \
504 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
505 uint32_t, (_eqp)->eq_u32[1], \
506 uint32_t, (_eqp)->eq_u32[0]); \
508 _addr = (volatile uint64_t *)(_base + (_offset)); \
509 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
512 SFC_BAR_UNLOCK(_esbp); \
513 _NOTE(CONSTANTCONDITION); \
517 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
518 * (required by PIO hardware).
520 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
521 * write-combined memory mapped to user-land, so just abort if used.
523 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
525 rte_panic("Write-combined BAR access not supported"); \
528 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
530 volatile uint8_t *_base = (_esbp)->esb_base; \
531 volatile __m128i *_addr; \
533 _NOTE(CONSTANTCONDITION); \
534 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
535 sizeof(efx_oword_t))); \
537 _NOTE(CONSTANTCONDITION); \
539 SFC_BAR_LOCK(_esbp); \
541 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
542 uint32_t, (_eop)->eo_u32[3], \
543 uint32_t, (_eop)->eo_u32[2], \
544 uint32_t, (_eop)->eo_u32[1], \
545 uint32_t, (_eop)->eo_u32[0]); \
547 _addr = (volatile __m128i *)(_base + (_offset)); \
548 /* There is no rte_write128_relaxed() yet */ \
549 _addr[0] = (_eop)->eo_u128[0]; \
552 _NOTE(CONSTANTCONDITION); \
554 SFC_BAR_UNLOCK(_esbp); \
555 _NOTE(CONSTANTCONDITION); \
558 /* Use the standard octo-word write for doorbell writes */
559 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
561 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
562 _NOTE(CONSTANTCONDITION); \
567 #define EFSYS_SPIN(_us) \
570 _NOTE(CONSTANTCONDITION); \
573 #define EFSYS_SLEEP EFSYS_SPIN
577 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
578 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
583 * DPDK does not provide any DMA syncing API, and no PMD drivers
584 * have any traces of explicit DMA syncing.
585 * DMA mapping is assumed to be coherent.
588 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
590 /* Just avoid store and compiler (impliciltly) reordering */
591 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
595 typedef uint64_t efsys_timestamp_t;
597 #define EFSYS_TIMESTAMP(_usp) \
599 *(_usp) = rte_get_timer_cycles() * 1000000 / \
600 rte_get_timer_hz(); \
601 _NOTE(CONSTANTCONDITION); \
606 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
609 (_p) = rte_zmalloc("sfc", (_size), 0); \
610 _NOTE(CONSTANTCONDITION); \
613 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
618 _NOTE(CONSTANTCONDITION); \
623 typedef rte_spinlock_t efsys_lock_t;
625 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
626 rte_spinlock_init((_eslp))
627 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
628 #define SFC_EFSYS_LOCK(_eslp) \
629 rte_spinlock_lock((_eslp))
630 #define SFC_EFSYS_UNLOCK(_eslp) \
631 rte_spinlock_unlock((_eslp))
632 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
633 SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
635 typedef int efsys_lock_state_t;
637 #define EFSYS_LOCK_MAGIC 0x000010c4
639 #define EFSYS_LOCK(_lockp, _state) \
641 SFC_EFSYS_LOCK(_lockp); \
642 (_state) = EFSYS_LOCK_MAGIC; \
643 _NOTE(CONSTANTCONDITION); \
646 #define EFSYS_UNLOCK(_lockp, _state) \
648 SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
649 SFC_EFSYS_UNLOCK(_lockp); \
650 _NOTE(CONSTANTCONDITION); \
655 typedef uint64_t efsys_stat_t;
657 #define EFSYS_STAT_INCR(_knp, _delta) \
659 *(_knp) += (_delta); \
660 _NOTE(CONSTANTCONDITION); \
663 #define EFSYS_STAT_DECR(_knp, _delta) \
665 *(_knp) -= (_delta); \
666 _NOTE(CONSTANTCONDITION); \
669 #define EFSYS_STAT_SET(_knp, _val) \
672 _NOTE(CONSTANTCONDITION); \
675 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
677 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
678 _NOTE(CONSTANTCONDITION); \
681 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
683 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
684 _NOTE(CONSTANTCONDITION); \
687 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
689 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
690 _NOTE(CONSTANTCONDITION); \
693 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
695 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
696 _NOTE(CONSTANTCONDITION); \
701 #if EFSYS_OPT_DECODE_INTR_FATAL
702 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
705 SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
706 (_code), (_dword0), (_dword1)); \
707 _NOTE(CONSTANTCONDITION); \
713 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
714 * so we re-implement it here
716 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
717 #define EFSYS_ASSERT(_exp) \
719 if (unlikely(!(_exp))) \
720 rte_panic("line %d\tassert \"%s\" failed\n", \
721 __LINE__, (#_exp)); \
724 #define EFSYS_ASSERT(_exp) (void)(_exp)
727 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
729 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
730 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
731 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
735 #define EFSYS_HAS_ROTL_DWORD 0
741 #endif /* _SFC_COMMON_EFSYS_H */