1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_debug.h"
35 #define EFSYS_HAS_UINT64 1
36 #define EFSYS_USE_UINT64 1
37 #define EFSYS_HAS_SSE2_M128 1
39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
40 #define EFSYS_IS_BIG_ENDIAN 1
41 #define EFSYS_IS_LITTLE_ENDIAN 0
42 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
43 #define EFSYS_IS_BIG_ENDIAN 0
44 #define EFSYS_IS_LITTLE_ENDIAN 1
46 #error "Cannot determine system endianness"
48 #include "efx_types.h"
51 typedef bool boolean_t;
61 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
62 * expression allowed only inside a function, but MAX() is used as
63 * a number of elements in array.
66 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
69 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
72 /* There are macros for alignment in DPDK, but we need to make a proper
73 * correspondence here, if we want to re-use them at all
76 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
80 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
84 #define P2ALIGN(_x, _a) ((_x) & -(_a))
88 #define ISP2(x) rte_is_power_of_2(x)
91 #define ENOTACTIVE ENOTCONN
94 prefetch_read_many(const volatile void *addr)
100 prefetch_read_once(const volatile void *addr)
102 rte_prefetch_non_temporal(addr);
105 /* Code inclusion options */
108 #define EFSYS_OPT_NAMES 1
110 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
111 #define EFSYS_OPT_SIENA 0
112 /* Enable SFN7xxx support */
113 #define EFSYS_OPT_HUNTINGTON 1
114 /* Enable SFN8xxx support */
115 #define EFSYS_OPT_MEDFORD 1
116 /* Enable SFN2xxx support */
117 #define EFSYS_OPT_MEDFORD2 1
118 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
119 #define EFSYS_OPT_CHECK_REG 1
121 #define EFSYS_OPT_CHECK_REG 0
124 /* MCDI is required for SFN7xxx and SFN8xx */
125 #define EFSYS_OPT_MCDI 1
126 #define EFSYS_OPT_MCDI_LOGGING 1
127 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
129 #define EFSYS_OPT_MAC_STATS 1
131 #define EFSYS_OPT_LOOPBACK 1
133 #define EFSYS_OPT_MON_MCDI 0
134 #define EFSYS_OPT_MON_STATS 0
136 #define EFSYS_OPT_PHY_STATS 0
137 #define EFSYS_OPT_BIST 0
138 #define EFSYS_OPT_PHY_LED_CONTROL 0
139 #define EFSYS_OPT_PHY_FLAGS 0
141 #define EFSYS_OPT_VPD 0
142 #define EFSYS_OPT_NVRAM 0
143 #define EFSYS_OPT_BOOTCFG 0
144 #define EFSYS_OPT_IMAGE_LAYOUT 0
146 #define EFSYS_OPT_DIAG 0
147 #define EFSYS_OPT_RX_SCALE 1
148 #define EFSYS_OPT_QSTATS 0
149 /* Filters support is required for SFN7xxx and SFN8xx */
150 #define EFSYS_OPT_FILTER 1
151 #define EFSYS_OPT_RX_SCATTER 0
153 #define EFSYS_OPT_EV_PREFETCH 0
155 #define EFSYS_OPT_DECODE_INTR_FATAL 0
157 #define EFSYS_OPT_LICENSING 0
159 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
161 #define EFSYS_OPT_RX_PACKED_STREAM 0
163 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
165 #define EFSYS_OPT_TUNNEL 1
167 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
169 #define EFSYS_OPT_EVB 0
171 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
175 typedef struct __efsys_identifier_s efsys_identifier_t;
178 #define EFSYS_PROBE(_name) \
181 #define EFSYS_PROBE1(_name, _type1, _arg1) \
184 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
187 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
191 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
192 _type3, _arg3, _type4, _arg4) \
195 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
196 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
199 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
200 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
204 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
205 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
206 _type6, _arg6, _type7, _arg7) \
212 typedef rte_iova_t efsys_dma_addr_t;
214 typedef struct efsys_mem_s {
215 const struct rte_memzone *esm_mz;
217 * Ideally it should have volatile qualifier to denote that
218 * the memory may be updated by someone else. However, it adds
219 * qualifier discard warnings when the pointer or its derivative
220 * is passed to memset() or rte_mov16().
221 * So, skip the qualifier here, but make sure that it is added
222 * below in access macros.
225 efsys_dma_addr_t esm_addr;
229 #define EFSYS_MEM_ZERO(_esmp, _size) \
231 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
233 _NOTE(CONSTANTCONDITION); \
236 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
238 volatile uint8_t *_base = (_esmp)->esm_base; \
239 volatile uint32_t *_addr; \
241 _NOTE(CONSTANTCONDITION); \
242 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
244 _addr = (volatile uint32_t *)(_base + (_offset)); \
245 (_edp)->ed_u32[0] = _addr[0]; \
247 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
248 uint32_t, (_edp)->ed_u32[0]); \
250 _NOTE(CONSTANTCONDITION); \
253 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
255 volatile uint8_t *_base = (_esmp)->esm_base; \
256 volatile uint64_t *_addr; \
258 _NOTE(CONSTANTCONDITION); \
259 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
261 _addr = (volatile uint64_t *)(_base + (_offset)); \
262 (_eqp)->eq_u64[0] = _addr[0]; \
264 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
265 uint32_t, (_eqp)->eq_u32[1], \
266 uint32_t, (_eqp)->eq_u32[0]); \
268 _NOTE(CONSTANTCONDITION); \
271 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
273 volatile uint8_t *_base = (_esmp)->esm_base; \
274 volatile __m128i *_addr; \
276 _NOTE(CONSTANTCONDITION); \
277 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
279 _addr = (volatile __m128i *)(_base + (_offset)); \
280 (_eop)->eo_u128[0] = _addr[0]; \
282 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
283 uint32_t, (_eop)->eo_u32[3], \
284 uint32_t, (_eop)->eo_u32[2], \
285 uint32_t, (_eop)->eo_u32[1], \
286 uint32_t, (_eop)->eo_u32[0]); \
288 _NOTE(CONSTANTCONDITION); \
292 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
294 volatile uint8_t *_base = (_esmp)->esm_base; \
295 volatile uint32_t *_addr; \
297 _NOTE(CONSTANTCONDITION); \
298 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
300 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
301 uint32_t, (_edp)->ed_u32[0]); \
303 _addr = (volatile uint32_t *)(_base + (_offset)); \
304 _addr[0] = (_edp)->ed_u32[0]; \
306 _NOTE(CONSTANTCONDITION); \
309 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
311 volatile uint8_t *_base = (_esmp)->esm_base; \
312 volatile uint64_t *_addr; \
314 _NOTE(CONSTANTCONDITION); \
315 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
317 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
318 uint32_t, (_eqp)->eq_u32[1], \
319 uint32_t, (_eqp)->eq_u32[0]); \
321 _addr = (volatile uint64_t *)(_base + (_offset)); \
322 _addr[0] = (_eqp)->eq_u64[0]; \
324 _NOTE(CONSTANTCONDITION); \
327 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
329 volatile uint8_t *_base = (_esmp)->esm_base; \
330 volatile __m128i *_addr; \
332 _NOTE(CONSTANTCONDITION); \
333 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
336 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
337 uint32_t, (_eop)->eo_u32[3], \
338 uint32_t, (_eop)->eo_u32[2], \
339 uint32_t, (_eop)->eo_u32[1], \
340 uint32_t, (_eop)->eo_u32[0]); \
342 _addr = (volatile __m128i *)(_base + (_offset)); \
343 _addr[0] = (_eop)->eo_u128[0]; \
345 _NOTE(CONSTANTCONDITION); \
349 #define EFSYS_MEM_SIZE(_esmp) \
350 ((_esmp)->esm_mz->len)
352 #define EFSYS_MEM_ADDR(_esmp) \
355 #define EFSYS_MEM_IS_NULL(_esmp) \
356 ((_esmp)->esm_base == NULL)
358 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
360 volatile uint8_t *_base = (_esmp)->esm_base; \
362 rte_prefetch0(_base + (_offset)); \
368 typedef struct efsys_bar_s {
369 rte_spinlock_t esb_lock;
371 struct rte_pci_device *esb_dev;
373 * Ideally it should have volatile qualifier to denote that
374 * the memory may be updated by someone else. However, it adds
375 * qualifier discard warnings when the pointer or its derivative
376 * is passed to memset() or rte_mov16().
377 * So, skip the qualifier here, but make sure that it is added
378 * below in access macros.
383 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
385 rte_spinlock_init(&(_esbp)->esb_lock); \
386 _NOTE(CONSTANTCONDITION); \
388 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
389 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
390 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
392 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
394 volatile uint8_t *_base = (_esbp)->esb_base; \
395 volatile uint32_t *_addr; \
397 _NOTE(CONSTANTCONDITION); \
398 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
399 _NOTE(CONSTANTCONDITION); \
401 SFC_BAR_LOCK(_esbp); \
403 _addr = (volatile uint32_t *)(_base + (_offset)); \
405 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
407 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
408 uint32_t, (_edp)->ed_u32[0]); \
410 _NOTE(CONSTANTCONDITION); \
412 SFC_BAR_UNLOCK(_esbp); \
413 _NOTE(CONSTANTCONDITION); \
416 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
418 volatile uint8_t *_base = (_esbp)->esb_base; \
419 volatile uint64_t *_addr; \
421 _NOTE(CONSTANTCONDITION); \
422 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
424 SFC_BAR_LOCK(_esbp); \
426 _addr = (volatile uint64_t *)(_base + (_offset)); \
428 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
430 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
431 uint32_t, (_eqp)->eq_u32[1], \
432 uint32_t, (_eqp)->eq_u32[0]); \
434 SFC_BAR_UNLOCK(_esbp); \
435 _NOTE(CONSTANTCONDITION); \
438 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
440 volatile uint8_t *_base = (_esbp)->esb_base; \
441 volatile __m128i *_addr; \
443 _NOTE(CONSTANTCONDITION); \
444 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
446 _NOTE(CONSTANTCONDITION); \
448 SFC_BAR_LOCK(_esbp); \
450 _addr = (volatile __m128i *)(_base + (_offset)); \
452 /* There is no rte_read128_relaxed() yet */ \
453 (_eop)->eo_u128[0] = _addr[0]; \
455 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
456 uint32_t, (_eop)->eo_u32[3], \
457 uint32_t, (_eop)->eo_u32[2], \
458 uint32_t, (_eop)->eo_u32[1], \
459 uint32_t, (_eop)->eo_u32[0]); \
461 _NOTE(CONSTANTCONDITION); \
463 SFC_BAR_UNLOCK(_esbp); \
464 _NOTE(CONSTANTCONDITION); \
468 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
470 volatile uint8_t *_base = (_esbp)->esb_base; \
471 volatile uint32_t *_addr; \
473 _NOTE(CONSTANTCONDITION); \
474 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
476 _NOTE(CONSTANTCONDITION); \
478 SFC_BAR_LOCK(_esbp); \
480 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
481 uint32_t, (_edp)->ed_u32[0]); \
483 _addr = (volatile uint32_t *)(_base + (_offset)); \
484 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
487 _NOTE(CONSTANTCONDITION); \
489 SFC_BAR_UNLOCK(_esbp); \
490 _NOTE(CONSTANTCONDITION); \
493 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
495 volatile uint8_t *_base = (_esbp)->esb_base; \
496 volatile uint64_t *_addr; \
498 _NOTE(CONSTANTCONDITION); \
499 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
501 SFC_BAR_LOCK(_esbp); \
503 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
504 uint32_t, (_eqp)->eq_u32[1], \
505 uint32_t, (_eqp)->eq_u32[0]); \
507 _addr = (volatile uint64_t *)(_base + (_offset)); \
508 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
511 SFC_BAR_UNLOCK(_esbp); \
512 _NOTE(CONSTANTCONDITION); \
516 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
517 * (required by PIO hardware).
519 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
520 * write-combined memory mapped to user-land, so just abort if used.
522 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
524 rte_panic("Write-combined BAR access not supported"); \
527 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
529 volatile uint8_t *_base = (_esbp)->esb_base; \
530 volatile __m128i *_addr; \
532 _NOTE(CONSTANTCONDITION); \
533 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
535 _NOTE(CONSTANTCONDITION); \
537 SFC_BAR_LOCK(_esbp); \
539 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
540 uint32_t, (_eop)->eo_u32[3], \
541 uint32_t, (_eop)->eo_u32[2], \
542 uint32_t, (_eop)->eo_u32[1], \
543 uint32_t, (_eop)->eo_u32[0]); \
545 _addr = (volatile __m128i *)(_base + (_offset)); \
546 /* There is no rte_write128_relaxed() yet */ \
547 _addr[0] = (_eop)->eo_u128[0]; \
550 _NOTE(CONSTANTCONDITION); \
552 SFC_BAR_UNLOCK(_esbp); \
553 _NOTE(CONSTANTCONDITION); \
556 /* Use the standard octo-word write for doorbell writes */
557 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
559 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
560 _NOTE(CONSTANTCONDITION); \
565 #define EFSYS_SPIN(_us) \
568 _NOTE(CONSTANTCONDITION); \
571 #define EFSYS_SLEEP EFSYS_SPIN
575 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
576 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
581 * DPDK does not provide any DMA syncing API, and no PMD drivers
582 * have any traces of explicit DMA syncing.
583 * DMA mapping is assumed to be coherent.
586 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
588 /* Just avoid store and compiler (impliciltly) reordering */
589 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
593 typedef uint64_t efsys_timestamp_t;
595 #define EFSYS_TIMESTAMP(_usp) \
597 *(_usp) = rte_get_timer_cycles() * 1000000 / \
598 rte_get_timer_hz(); \
599 _NOTE(CONSTANTCONDITION); \
604 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
607 (_p) = rte_zmalloc("sfc", (_size), 0); \
608 _NOTE(CONSTANTCONDITION); \
611 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
616 _NOTE(CONSTANTCONDITION); \
621 typedef rte_spinlock_t efsys_lock_t;
623 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
624 rte_spinlock_init((_eslp))
625 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
626 #define SFC_EFSYS_LOCK(_eslp) \
627 rte_spinlock_lock((_eslp))
628 #define SFC_EFSYS_UNLOCK(_eslp) \
629 rte_spinlock_unlock((_eslp))
630 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
631 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
633 typedef int efsys_lock_state_t;
635 #define EFSYS_LOCK_MAGIC 0x000010c4
637 #define EFSYS_LOCK(_lockp, _state) \
639 SFC_EFSYS_LOCK(_lockp); \
640 (_state) = EFSYS_LOCK_MAGIC; \
641 _NOTE(CONSTANTCONDITION); \
644 #define EFSYS_UNLOCK(_lockp, _state) \
646 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
647 SFC_EFSYS_UNLOCK(_lockp); \
648 _NOTE(CONSTANTCONDITION); \
653 typedef uint64_t efsys_stat_t;
655 #define EFSYS_STAT_INCR(_knp, _delta) \
657 *(_knp) += (_delta); \
658 _NOTE(CONSTANTCONDITION); \
661 #define EFSYS_STAT_DECR(_knp, _delta) \
663 *(_knp) -= (_delta); \
664 _NOTE(CONSTANTCONDITION); \
667 #define EFSYS_STAT_SET(_knp, _val) \
670 _NOTE(CONSTANTCONDITION); \
673 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
675 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
676 _NOTE(CONSTANTCONDITION); \
679 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
681 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
682 _NOTE(CONSTANTCONDITION); \
685 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
687 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
688 _NOTE(CONSTANTCONDITION); \
691 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
693 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
694 _NOTE(CONSTANTCONDITION); \
699 #if EFSYS_OPT_DECODE_INTR_FATAL
700 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
703 SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
704 (_code), (_dword0), (_dword1)); \
705 _NOTE(CONSTANTCONDITION); \
711 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
712 * so we re-implement it here
714 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
715 #define EFSYS_ASSERT(_exp) \
717 if (unlikely(!(_exp))) \
718 rte_panic("line %d\tassert \"%s\" failed\n", \
719 __LINE__, (#_exp)); \
722 #define EFSYS_ASSERT(_exp) (void)(_exp)
725 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
727 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
728 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
729 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
733 #define EFSYS_HAS_ROTL_DWORD 0
739 #endif /* _SFC_COMMON_EFSYS_H */