1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_debug.h"
34 #define EFSYS_HAS_UINT64 1
35 #define EFSYS_USE_UINT64 1
36 #define EFSYS_HAS_SSE2_M128 1
38 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
39 #define EFSYS_IS_BIG_ENDIAN 1
40 #define EFSYS_IS_LITTLE_ENDIAN 0
41 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
42 #define EFSYS_IS_BIG_ENDIAN 0
43 #define EFSYS_IS_LITTLE_ENDIAN 1
45 #error "Cannot determine system endianness"
47 #include "efx_types.h"
54 typedef bool boolean_t;
64 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
65 * expression allowed only inside a function, but MAX() is used as
66 * a number of elements in array.
69 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
72 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
75 /* There are macros for alignment in DPDK, but we need to make a proper
76 * correspondence here, if we want to re-use them at all
79 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
83 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
87 #define P2ALIGN(_x, _a) ((_x) & -(_a))
91 #define ISP2(x) rte_is_power_of_2(x)
94 #define ENOTACTIVE ENOTCONN
97 prefetch_read_many(const volatile void *addr)
103 prefetch_read_once(const volatile void *addr)
105 rte_prefetch_non_temporal(addr);
108 /* Modifiers used for Windows builds */
111 #define __in_ecount(_n)
112 #define __in_ecount_opt(_n)
113 #define __in_bcount(_n)
114 #define __in_bcount_opt(_n)
118 #define __out_ecount(_n)
119 #define __out_ecount_opt(_n)
120 #define __out_bcount(_n)
121 #define __out_bcount_opt(_n)
122 #define __out_bcount_part(_n, _l)
123 #define __out_bcount_part_opt(_n, _l)
129 #define __inout_ecount(_n)
130 #define __inout_ecount_opt(_n)
131 #define __inout_bcount(_n)
132 #define __inout_bcount_opt(_n)
133 #define __inout_bcount_full_opt(_n)
135 #define __deref_out_bcount_opt(n)
137 #define __checkReturn
138 #define __success(_x)
140 #define __drv_when(_p, _c)
142 /* Code inclusion options */
145 #define EFSYS_OPT_NAMES 1
147 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
148 #define EFSYS_OPT_SIENA 0
149 /* Enable SFN7xxx support */
150 #define EFSYS_OPT_HUNTINGTON 1
151 /* Enable SFN8xxx support */
152 #define EFSYS_OPT_MEDFORD 1
153 /* Disable SFN2xxx support (not supported yet) */
154 #define EFSYS_OPT_MEDFORD2 0
155 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
156 #define EFSYS_OPT_CHECK_REG 1
158 #define EFSYS_OPT_CHECK_REG 0
161 /* MCDI is required for SFN7xxx and SFN8xx */
162 #define EFSYS_OPT_MCDI 1
163 #define EFSYS_OPT_MCDI_LOGGING 1
164 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
166 #define EFSYS_OPT_MAC_STATS 1
168 #define EFSYS_OPT_LOOPBACK 0
170 #define EFSYS_OPT_MON_MCDI 0
171 #define EFSYS_OPT_MON_STATS 0
173 #define EFSYS_OPT_PHY_STATS 0
174 #define EFSYS_OPT_BIST 0
175 #define EFSYS_OPT_PHY_LED_CONTROL 0
176 #define EFSYS_OPT_PHY_FLAGS 0
178 #define EFSYS_OPT_VPD 0
179 #define EFSYS_OPT_NVRAM 0
180 #define EFSYS_OPT_BOOTCFG 0
182 #define EFSYS_OPT_DIAG 0
183 #define EFSYS_OPT_RX_SCALE 1
184 #define EFSYS_OPT_QSTATS 0
185 /* Filters support is required for SFN7xxx and SFN8xx */
186 #define EFSYS_OPT_FILTER 1
187 #define EFSYS_OPT_RX_SCATTER 0
189 #define EFSYS_OPT_EV_PREFETCH 0
191 #define EFSYS_OPT_DECODE_INTR_FATAL 0
193 #define EFSYS_OPT_LICENSING 0
195 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
197 #define EFSYS_OPT_RX_PACKED_STREAM 0
199 #define EFSYS_OPT_TUNNEL 1
203 typedef struct __efsys_identifier_s efsys_identifier_t;
206 #define EFSYS_PROBE(_name) \
209 #define EFSYS_PROBE1(_name, _type1, _arg1) \
212 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
215 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
219 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
220 _type3, _arg3, _type4, _arg4) \
223 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
224 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
227 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
228 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
232 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
233 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
234 _type6, _arg6, _type7, _arg7) \
240 typedef rte_iova_t efsys_dma_addr_t;
242 typedef struct efsys_mem_s {
243 const struct rte_memzone *esm_mz;
245 * Ideally it should have volatile qualifier to denote that
246 * the memory may be updated by someone else. However, it adds
247 * qualifier discard warnings when the pointer or its derivative
248 * is passed to memset() or rte_mov16().
249 * So, skip the qualifier here, but make sure that it is added
250 * below in access macros.
253 efsys_dma_addr_t esm_addr;
257 #define EFSYS_MEM_ZERO(_esmp, _size) \
259 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
261 _NOTE(CONSTANTCONDITION); \
264 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
266 volatile uint8_t *_base = (_esmp)->esm_base; \
267 volatile uint32_t *_addr; \
269 _NOTE(CONSTANTCONDITION); \
270 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
272 _addr = (volatile uint32_t *)(_base + (_offset)); \
273 (_edp)->ed_u32[0] = _addr[0]; \
275 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
276 uint32_t, (_edp)->ed_u32[0]); \
278 _NOTE(CONSTANTCONDITION); \
281 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
283 volatile uint8_t *_base = (_esmp)->esm_base; \
284 volatile uint64_t *_addr; \
286 _NOTE(CONSTANTCONDITION); \
287 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
289 _addr = (volatile uint64_t *)(_base + (_offset)); \
290 (_eqp)->eq_u64[0] = _addr[0]; \
292 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
293 uint32_t, (_eqp)->eq_u32[1], \
294 uint32_t, (_eqp)->eq_u32[0]); \
296 _NOTE(CONSTANTCONDITION); \
299 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
301 volatile uint8_t *_base = (_esmp)->esm_base; \
302 volatile __m128i *_addr; \
304 _NOTE(CONSTANTCONDITION); \
305 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
307 _addr = (volatile __m128i *)(_base + (_offset)); \
308 (_eop)->eo_u128[0] = _addr[0]; \
310 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
311 uint32_t, (_eop)->eo_u32[3], \
312 uint32_t, (_eop)->eo_u32[2], \
313 uint32_t, (_eop)->eo_u32[1], \
314 uint32_t, (_eop)->eo_u32[0]); \
316 _NOTE(CONSTANTCONDITION); \
320 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
322 volatile uint8_t *_base = (_esmp)->esm_base; \
323 volatile uint32_t *_addr; \
325 _NOTE(CONSTANTCONDITION); \
326 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
328 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
329 uint32_t, (_edp)->ed_u32[0]); \
331 _addr = (volatile uint32_t *)(_base + (_offset)); \
332 _addr[0] = (_edp)->ed_u32[0]; \
334 _NOTE(CONSTANTCONDITION); \
337 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
339 volatile uint8_t *_base = (_esmp)->esm_base; \
340 volatile uint64_t *_addr; \
342 _NOTE(CONSTANTCONDITION); \
343 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
345 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
346 uint32_t, (_eqp)->eq_u32[1], \
347 uint32_t, (_eqp)->eq_u32[0]); \
349 _addr = (volatile uint64_t *)(_base + (_offset)); \
350 _addr[0] = (_eqp)->eq_u64[0]; \
352 _NOTE(CONSTANTCONDITION); \
355 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
357 volatile uint8_t *_base = (_esmp)->esm_base; \
358 volatile __m128i *_addr; \
360 _NOTE(CONSTANTCONDITION); \
361 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
364 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
365 uint32_t, (_eop)->eo_u32[3], \
366 uint32_t, (_eop)->eo_u32[2], \
367 uint32_t, (_eop)->eo_u32[1], \
368 uint32_t, (_eop)->eo_u32[0]); \
370 _addr = (volatile __m128i *)(_base + (_offset)); \
371 _addr[0] = (_eop)->eo_u128[0]; \
373 _NOTE(CONSTANTCONDITION); \
377 #define EFSYS_MEM_ADDR(_esmp) \
380 #define EFSYS_MEM_IS_NULL(_esmp) \
381 ((_esmp)->esm_base == NULL)
383 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
385 volatile uint8_t *_base = (_esmp)->esm_base; \
387 rte_prefetch0(_base + (_offset)); \
393 typedef struct efsys_bar_s {
394 rte_spinlock_t esb_lock;
396 struct rte_pci_device *esb_dev;
398 * Ideally it should have volatile qualifier to denote that
399 * the memory may be updated by someone else. However, it adds
400 * qualifier discard warnings when the pointer or its derivative
401 * is passed to memset() or rte_mov16().
402 * So, skip the qualifier here, but make sure that it is added
403 * below in access macros.
408 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
410 rte_spinlock_init(&(_esbp)->esb_lock); \
411 _NOTE(CONSTANTCONDITION); \
413 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
414 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
415 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
417 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
419 volatile uint8_t *_base = (_esbp)->esb_base; \
420 volatile uint32_t *_addr; \
422 _NOTE(CONSTANTCONDITION); \
423 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
424 _NOTE(CONSTANTCONDITION); \
426 SFC_BAR_LOCK(_esbp); \
428 _addr = (volatile uint32_t *)(_base + (_offset)); \
430 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
432 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
433 uint32_t, (_edp)->ed_u32[0]); \
435 _NOTE(CONSTANTCONDITION); \
437 SFC_BAR_UNLOCK(_esbp); \
438 _NOTE(CONSTANTCONDITION); \
441 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
443 volatile uint8_t *_base = (_esbp)->esb_base; \
444 volatile uint64_t *_addr; \
446 _NOTE(CONSTANTCONDITION); \
447 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
449 SFC_BAR_LOCK(_esbp); \
451 _addr = (volatile uint64_t *)(_base + (_offset)); \
453 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
455 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
456 uint32_t, (_eqp)->eq_u32[1], \
457 uint32_t, (_eqp)->eq_u32[0]); \
459 SFC_BAR_UNLOCK(_esbp); \
460 _NOTE(CONSTANTCONDITION); \
463 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
465 volatile uint8_t *_base = (_esbp)->esb_base; \
466 volatile __m128i *_addr; \
468 _NOTE(CONSTANTCONDITION); \
469 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
471 _NOTE(CONSTANTCONDITION); \
473 SFC_BAR_LOCK(_esbp); \
475 _addr = (volatile __m128i *)(_base + (_offset)); \
477 /* There is no rte_read128_relaxed() yet */ \
478 (_eop)->eo_u128[0] = _addr[0]; \
480 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
481 uint32_t, (_eop)->eo_u32[3], \
482 uint32_t, (_eop)->eo_u32[2], \
483 uint32_t, (_eop)->eo_u32[1], \
484 uint32_t, (_eop)->eo_u32[0]); \
486 _NOTE(CONSTANTCONDITION); \
488 SFC_BAR_UNLOCK(_esbp); \
489 _NOTE(CONSTANTCONDITION); \
493 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
495 volatile uint8_t *_base = (_esbp)->esb_base; \
496 volatile uint32_t *_addr; \
498 _NOTE(CONSTANTCONDITION); \
499 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
501 _NOTE(CONSTANTCONDITION); \
503 SFC_BAR_LOCK(_esbp); \
505 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
506 uint32_t, (_edp)->ed_u32[0]); \
508 _addr = (volatile uint32_t *)(_base + (_offset)); \
509 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
512 _NOTE(CONSTANTCONDITION); \
514 SFC_BAR_UNLOCK(_esbp); \
515 _NOTE(CONSTANTCONDITION); \
518 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
520 volatile uint8_t *_base = (_esbp)->esb_base; \
521 volatile uint64_t *_addr; \
523 _NOTE(CONSTANTCONDITION); \
524 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
526 SFC_BAR_LOCK(_esbp); \
528 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
529 uint32_t, (_eqp)->eq_u32[1], \
530 uint32_t, (_eqp)->eq_u32[0]); \
532 _addr = (volatile uint64_t *)(_base + (_offset)); \
533 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
536 SFC_BAR_UNLOCK(_esbp); \
537 _NOTE(CONSTANTCONDITION); \
541 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
542 * (required by PIO hardware).
544 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
545 * write-combined memory mapped to user-land, so just abort if used.
547 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
549 rte_panic("Write-combined BAR access not supported"); \
552 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
554 volatile uint8_t *_base = (_esbp)->esb_base; \
555 volatile __m128i *_addr; \
557 _NOTE(CONSTANTCONDITION); \
558 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
560 _NOTE(CONSTANTCONDITION); \
562 SFC_BAR_LOCK(_esbp); \
564 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
565 uint32_t, (_eop)->eo_u32[3], \
566 uint32_t, (_eop)->eo_u32[2], \
567 uint32_t, (_eop)->eo_u32[1], \
568 uint32_t, (_eop)->eo_u32[0]); \
570 _addr = (volatile __m128i *)(_base + (_offset)); \
571 /* There is no rte_write128_relaxed() yet */ \
572 _addr[0] = (_eop)->eo_u128[0]; \
575 _NOTE(CONSTANTCONDITION); \
577 SFC_BAR_UNLOCK(_esbp); \
578 _NOTE(CONSTANTCONDITION); \
581 /* Use the standard octo-word write for doorbell writes */
582 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
584 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
585 _NOTE(CONSTANTCONDITION); \
590 #define EFSYS_SPIN(_us) \
593 _NOTE(CONSTANTCONDITION); \
596 #define EFSYS_SLEEP EFSYS_SPIN
600 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
601 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
606 * DPDK does not provide any DMA syncing API, and no PMD drivers
607 * have any traces of explicit DMA syncing.
608 * DMA mapping is assumed to be coherent.
611 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
613 /* Just avoid store and compiler (impliciltly) reordering */
614 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
618 typedef uint64_t efsys_timestamp_t;
620 #define EFSYS_TIMESTAMP(_usp) \
622 *(_usp) = rte_get_timer_cycles() * 1000000 / \
623 rte_get_timer_hz(); \
624 _NOTE(CONSTANTCONDITION); \
629 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
632 (_p) = rte_zmalloc("sfc", (_size), 0); \
633 _NOTE(CONSTANTCONDITION); \
636 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
641 _NOTE(CONSTANTCONDITION); \
646 typedef rte_spinlock_t efsys_lock_t;
648 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
649 rte_spinlock_init((_eslp))
650 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
651 #define SFC_EFSYS_LOCK(_eslp) \
652 rte_spinlock_lock((_eslp))
653 #define SFC_EFSYS_UNLOCK(_eslp) \
654 rte_spinlock_unlock((_eslp))
655 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
656 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
658 typedef int efsys_lock_state_t;
660 #define EFSYS_LOCK_MAGIC 0x000010c4
662 #define EFSYS_LOCK(_lockp, _state) \
664 SFC_EFSYS_LOCK(_lockp); \
665 (_state) = EFSYS_LOCK_MAGIC; \
666 _NOTE(CONSTANTCONDITION); \
669 #define EFSYS_UNLOCK(_lockp, _state) \
671 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
672 SFC_EFSYS_UNLOCK(_lockp); \
673 _NOTE(CONSTANTCONDITION); \
678 typedef uint64_t efsys_stat_t;
680 #define EFSYS_STAT_INCR(_knp, _delta) \
682 *(_knp) += (_delta); \
683 _NOTE(CONSTANTCONDITION); \
686 #define EFSYS_STAT_DECR(_knp, _delta) \
688 *(_knp) -= (_delta); \
689 _NOTE(CONSTANTCONDITION); \
692 #define EFSYS_STAT_SET(_knp, _val) \
695 _NOTE(CONSTANTCONDITION); \
698 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
700 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
701 _NOTE(CONSTANTCONDITION); \
704 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
706 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
707 _NOTE(CONSTANTCONDITION); \
710 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
712 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
713 _NOTE(CONSTANTCONDITION); \
716 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
718 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
719 _NOTE(CONSTANTCONDITION); \
724 #if EFSYS_OPT_DECODE_INTR_FATAL
725 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
728 RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
729 (_code), (_dword0), (_dword1)); \
730 _NOTE(CONSTANTCONDITION); \
736 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
737 * so we re-implement it here
739 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
740 #define EFSYS_ASSERT(_exp) \
742 if (unlikely(!(_exp))) \
743 rte_panic("line %d\tassert \"%s\" failed\n", \
744 __LINE__, (#_exp)); \
747 #define EFSYS_ASSERT(_exp) (void)(_exp)
750 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
752 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
753 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
754 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
758 #define EFSYS_HAS_ROTL_DWORD 0
764 #endif /* _SFC_COMMON_EFSYS_H */