1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_efx_debug.h"
29 #include "sfc_efx_log.h"
35 #define LIBEFX_API __rte_internal
37 /* No specific decorations required since functions are local by default */
38 #define LIBEFX_INTERNAL
40 #define EFSYS_HAS_UINT64 1
41 #define EFSYS_USE_UINT64 1
43 * __SSE2__ is defined by a compiler if target architecture supports
44 * Streaming SIMD Extensions 2 (SSE2). __m128i is a data type used
45 * by the extension instructions.
48 #define EFSYS_HAS_UINT128 1
49 typedef __m128i efsys_uint128_t;
51 * __int128 and unsigned __int128 are compiler extensions (built-in types).
52 * __SIZEOF_INT128__ is defined by the compiler if these data types are
55 #elif defined(__SIZEOF_INT128__)
56 #define EFSYS_HAS_UINT128 1
57 typedef unsigned __int128 efsys_uint128_t;
59 #error Unsigned 128-bit width integers support is required
62 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
63 #define EFSYS_IS_BIG_ENDIAN 1
64 #define EFSYS_IS_LITTLE_ENDIAN 0
65 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
66 #define EFSYS_IS_BIG_ENDIAN 0
67 #define EFSYS_IS_LITTLE_ENDIAN 1
69 #error "Cannot determine system endianness"
73 typedef bool boolean_t;
83 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
84 * expression allowed only inside a function, but MAX() is used as
85 * a number of elements in array.
88 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
91 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
95 #define ISP2(x) rte_is_power_of_2(x)
98 #define ENOTACTIVE ENOTCONN
101 prefetch_read_many(const volatile void *addr)
107 prefetch_read_once(const volatile void *addr)
109 rte_prefetch_non_temporal(addr);
112 /* Code inclusion options */
115 #define EFSYS_OPT_NAMES 1
117 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
118 #define EFSYS_OPT_SIENA 0
119 /* Enable SFN7xxx support */
120 #define EFSYS_OPT_HUNTINGTON 1
121 /* Enable SFN8xxx support */
122 #define EFSYS_OPT_MEDFORD 1
123 /* Enable SFN2xxx support */
124 #define EFSYS_OPT_MEDFORD2 1
125 /* Enable Riverhead support */
126 #define EFSYS_OPT_RIVERHEAD 1
128 #ifdef RTE_DEBUG_COMMON_SFC_EFX
129 #define EFSYS_OPT_CHECK_REG 1
131 #define EFSYS_OPT_CHECK_REG 0
134 /* MCDI is required for SFN7xxx and SFN8xx */
135 #define EFSYS_OPT_MCDI 1
136 #define EFSYS_OPT_MCDI_LOGGING 1
137 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
139 #define EFSYS_OPT_MAC_STATS 1
141 #define EFSYS_OPT_LOOPBACK 1
143 #define EFSYS_OPT_MON_MCDI 0
144 #define EFSYS_OPT_MON_STATS 0
146 #define EFSYS_OPT_PHY_STATS 0
147 #define EFSYS_OPT_BIST 0
148 #define EFSYS_OPT_PHY_LED_CONTROL 0
149 #define EFSYS_OPT_PHY_FLAGS 0
151 #define EFSYS_OPT_VPD 0
152 #define EFSYS_OPT_NVRAM 0
153 #define EFSYS_OPT_BOOTCFG 0
154 #define EFSYS_OPT_IMAGE_LAYOUT 0
156 #define EFSYS_OPT_DIAG 0
157 #define EFSYS_OPT_RX_SCALE 1
158 #define EFSYS_OPT_QSTATS 0
159 /* Filters support is required for SFN7xxx and SFN8xx */
160 #define EFSYS_OPT_FILTER 1
161 #define EFSYS_OPT_RX_SCATTER 0
163 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0
164 #define EFSYS_OPT_EV_PREFETCH 0
166 #define EFSYS_OPT_DECODE_INTR_FATAL 0
168 #define EFSYS_OPT_LICENSING 0
170 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
172 #define EFSYS_OPT_RX_PACKED_STREAM 0
174 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
176 #define EFSYS_OPT_TUNNEL 1
178 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
180 #define EFSYS_OPT_EVB 1
182 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
184 #define EFSYS_OPT_PCI 1
186 #define EFSYS_OPT_DESC_PROXY 0
188 #define EFSYS_OPT_MAE 1
190 #define EFSYS_OPT_VIRTIO 1
194 typedef struct __efsys_identifier_s efsys_identifier_t;
197 #define EFSYS_PROBE(_name) \
200 #define EFSYS_PROBE1(_name, _type1, _arg1) \
203 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
206 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
210 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
211 _type3, _arg3, _type4, _arg4) \
214 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
215 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
218 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
219 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
223 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
224 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
225 _type6, _arg6, _type7, _arg7) \
231 typedef rte_iova_t efsys_dma_addr_t;
233 typedef struct efsys_mem_s {
234 const struct rte_memzone *esm_mz;
236 * Ideally it should have volatile qualifier to denote that
237 * the memory may be updated by someone else. However, it adds
238 * qualifier discard warnings when the pointer or its derivative
239 * is passed to memset() or rte_mov16().
240 * So, skip the qualifier here, but make sure that it is added
241 * below in access macros.
244 efsys_dma_addr_t esm_addr;
248 #define EFSYS_MEM_ZERO(_esmp, _size) \
250 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
252 _NOTE(CONSTANTCONDITION); \
255 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
257 volatile uint8_t *_base = (_esmp)->esm_base; \
258 volatile uint32_t *_addr; \
260 _NOTE(CONSTANTCONDITION); \
261 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
262 sizeof(efx_dword_t))); \
264 _addr = (volatile uint32_t *)(_base + (_offset)); \
265 (_edp)->ed_u32[0] = _addr[0]; \
267 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
268 uint32_t, (_edp)->ed_u32[0]); \
270 _NOTE(CONSTANTCONDITION); \
273 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
275 volatile uint8_t *_base = (_esmp)->esm_base; \
276 volatile uint64_t *_addr; \
278 _NOTE(CONSTANTCONDITION); \
279 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
280 sizeof(efx_qword_t))); \
282 _addr = (volatile uint64_t *)(_base + (_offset)); \
283 (_eqp)->eq_u64[0] = _addr[0]; \
285 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
286 uint32_t, (_eqp)->eq_u32[1], \
287 uint32_t, (_eqp)->eq_u32[0]); \
289 _NOTE(CONSTANTCONDITION); \
292 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
294 volatile uint8_t *_base = (_esmp)->esm_base; \
295 volatile efsys_uint128_t *_addr; \
297 _NOTE(CONSTANTCONDITION); \
298 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
299 sizeof(efx_oword_t))); \
301 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
302 (_eop)->eo_u128[0] = _addr[0]; \
304 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
305 uint32_t, (_eop)->eo_u32[3], \
306 uint32_t, (_eop)->eo_u32[2], \
307 uint32_t, (_eop)->eo_u32[1], \
308 uint32_t, (_eop)->eo_u32[0]); \
310 _NOTE(CONSTANTCONDITION); \
314 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
316 volatile uint8_t *_base = (_esmp)->esm_base; \
317 volatile uint32_t *_addr; \
319 _NOTE(CONSTANTCONDITION); \
320 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
321 sizeof(efx_dword_t))); \
323 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
324 uint32_t, (_edp)->ed_u32[0]); \
326 _addr = (volatile uint32_t *)(_base + (_offset)); \
327 _addr[0] = (_edp)->ed_u32[0]; \
329 _NOTE(CONSTANTCONDITION); \
332 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
334 volatile uint8_t *_base = (_esmp)->esm_base; \
335 volatile uint64_t *_addr; \
337 _NOTE(CONSTANTCONDITION); \
338 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
339 sizeof(efx_qword_t))); \
341 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
342 uint32_t, (_eqp)->eq_u32[1], \
343 uint32_t, (_eqp)->eq_u32[0]); \
345 _addr = (volatile uint64_t *)(_base + (_offset)); \
346 _addr[0] = (_eqp)->eq_u64[0]; \
348 _NOTE(CONSTANTCONDITION); \
351 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
353 volatile uint8_t *_base = (_esmp)->esm_base; \
354 volatile efsys_uint128_t *_addr; \
356 _NOTE(CONSTANTCONDITION); \
357 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
358 sizeof(efx_oword_t))); \
361 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
362 uint32_t, (_eop)->eo_u32[3], \
363 uint32_t, (_eop)->eo_u32[2], \
364 uint32_t, (_eop)->eo_u32[1], \
365 uint32_t, (_eop)->eo_u32[0]); \
367 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
368 _addr[0] = (_eop)->eo_u128[0]; \
370 _NOTE(CONSTANTCONDITION); \
374 #define EFSYS_MEM_SIZE(_esmp) \
375 ((_esmp)->esm_mz->len)
377 #define EFSYS_MEM_ADDR(_esmp) \
380 #define EFSYS_MEM_IS_NULL(_esmp) \
381 ((_esmp)->esm_base == NULL)
383 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
385 volatile uint8_t *_base = (_esmp)->esm_base; \
387 rte_prefetch0(_base + (_offset)); \
393 typedef struct efsys_bar_s {
394 rte_spinlock_t esb_lock;
396 struct rte_pci_device *esb_dev;
398 * Ideally it should have volatile qualifier to denote that
399 * the memory may be updated by someone else. However, it adds
400 * qualifier discard warnings when the pointer or its derivative
401 * is passed to memset() or rte_mov16().
402 * So, skip the qualifier here, but make sure that it is added
403 * below in access macros.
408 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
410 rte_spinlock_init(&(_esbp)->esb_lock); \
411 _NOTE(CONSTANTCONDITION); \
413 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
414 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
415 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
417 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
419 volatile uint8_t *_base = (_esbp)->esb_base; \
420 volatile uint32_t *_addr; \
422 _NOTE(CONSTANTCONDITION); \
423 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
424 sizeof(efx_dword_t))); \
425 _NOTE(CONSTANTCONDITION); \
427 SFC_BAR_LOCK(_esbp); \
429 _addr = (volatile uint32_t *)(_base + (_offset)); \
431 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
433 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
434 uint32_t, (_edp)->ed_u32[0]); \
436 _NOTE(CONSTANTCONDITION); \
438 SFC_BAR_UNLOCK(_esbp); \
439 _NOTE(CONSTANTCONDITION); \
442 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
444 volatile uint8_t *_base = (_esbp)->esb_base; \
445 volatile uint64_t *_addr; \
447 _NOTE(CONSTANTCONDITION); \
448 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
449 sizeof(efx_qword_t))); \
451 SFC_BAR_LOCK(_esbp); \
453 _addr = (volatile uint64_t *)(_base + (_offset)); \
455 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
457 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
458 uint32_t, (_eqp)->eq_u32[1], \
459 uint32_t, (_eqp)->eq_u32[0]); \
461 SFC_BAR_UNLOCK(_esbp); \
462 _NOTE(CONSTANTCONDITION); \
465 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
467 volatile uint8_t *_base = (_esbp)->esb_base; \
468 volatile efsys_uint128_t *_addr; \
470 _NOTE(CONSTANTCONDITION); \
471 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
472 sizeof(efx_oword_t))); \
474 _NOTE(CONSTANTCONDITION); \
476 SFC_BAR_LOCK(_esbp); \
478 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
480 /* There is no rte_read128_relaxed() yet */ \
481 (_eop)->eo_u128[0] = _addr[0]; \
483 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
484 uint32_t, (_eop)->eo_u32[3], \
485 uint32_t, (_eop)->eo_u32[2], \
486 uint32_t, (_eop)->eo_u32[1], \
487 uint32_t, (_eop)->eo_u32[0]); \
489 _NOTE(CONSTANTCONDITION); \
491 SFC_BAR_UNLOCK(_esbp); \
492 _NOTE(CONSTANTCONDITION); \
496 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
498 volatile uint8_t *_base = (_esbp)->esb_base; \
499 volatile uint32_t *_addr; \
501 _NOTE(CONSTANTCONDITION); \
502 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
503 sizeof(efx_dword_t))); \
505 _NOTE(CONSTANTCONDITION); \
507 SFC_BAR_LOCK(_esbp); \
509 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
510 uint32_t, (_edp)->ed_u32[0]); \
512 _addr = (volatile uint32_t *)(_base + (_offset)); \
513 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
516 _NOTE(CONSTANTCONDITION); \
518 SFC_BAR_UNLOCK(_esbp); \
519 _NOTE(CONSTANTCONDITION); \
522 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
524 volatile uint8_t *_base = (_esbp)->esb_base; \
525 volatile uint64_t *_addr; \
527 _NOTE(CONSTANTCONDITION); \
528 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
529 sizeof(efx_qword_t))); \
531 SFC_BAR_LOCK(_esbp); \
533 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
534 uint32_t, (_eqp)->eq_u32[1], \
535 uint32_t, (_eqp)->eq_u32[0]); \
537 _addr = (volatile uint64_t *)(_base + (_offset)); \
538 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
541 SFC_BAR_UNLOCK(_esbp); \
542 _NOTE(CONSTANTCONDITION); \
546 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
547 * (required by PIO hardware).
549 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
550 * write-combined memory mapped to user-land, so just abort if used.
552 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
554 rte_panic("Write-combined BAR access not supported"); \
557 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
559 volatile uint8_t *_base = (_esbp)->esb_base; \
560 volatile efsys_uint128_t *_addr; \
562 _NOTE(CONSTANTCONDITION); \
563 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
564 sizeof(efx_oword_t))); \
566 _NOTE(CONSTANTCONDITION); \
568 SFC_BAR_LOCK(_esbp); \
570 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
571 uint32_t, (_eop)->eo_u32[3], \
572 uint32_t, (_eop)->eo_u32[2], \
573 uint32_t, (_eop)->eo_u32[1], \
574 uint32_t, (_eop)->eo_u32[0]); \
576 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
577 /* There is no rte_write128_relaxed() yet */ \
578 _addr[0] = (_eop)->eo_u128[0]; \
581 _NOTE(CONSTANTCONDITION); \
583 SFC_BAR_UNLOCK(_esbp); \
584 _NOTE(CONSTANTCONDITION); \
587 /* Use the standard octo-word write for doorbell writes */
588 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
590 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
591 _NOTE(CONSTANTCONDITION); \
596 #define EFSYS_SPIN(_us) \
599 _NOTE(CONSTANTCONDITION); \
602 #define EFSYS_SLEEP EFSYS_SPIN
606 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
607 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
612 * DPDK does not provide any DMA syncing API, and no PMDs
613 * have any traces of explicit DMA syncing.
614 * DMA mapping is assumed to be coherent.
617 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
619 /* Just avoid store and compiler (implicitly) reordering */
620 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
624 typedef uint64_t efsys_timestamp_t;
626 #define EFSYS_TIMESTAMP(_usp) \
628 *(_usp) = rte_get_timer_cycles() * 1000000 / \
629 rte_get_timer_hz(); \
630 _NOTE(CONSTANTCONDITION); \
635 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
638 (_p) = rte_zmalloc("sfc", (_size), 0); \
639 _NOTE(CONSTANTCONDITION); \
642 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
647 _NOTE(CONSTANTCONDITION); \
652 typedef rte_spinlock_t efsys_lock_t;
654 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
655 rte_spinlock_init((_eslp))
656 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
657 #define SFC_EFSYS_LOCK(_eslp) \
658 rte_spinlock_lock((_eslp))
659 #define SFC_EFSYS_UNLOCK(_eslp) \
660 rte_spinlock_unlock((_eslp))
661 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
662 SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
664 typedef int efsys_lock_state_t;
666 #define EFSYS_LOCK_MAGIC 0x000010c4
668 #define EFSYS_LOCK(_lockp, _state) \
670 SFC_EFSYS_LOCK(_lockp); \
671 (_state) = EFSYS_LOCK_MAGIC; \
672 _NOTE(CONSTANTCONDITION); \
675 #define EFSYS_UNLOCK(_lockp, _state) \
677 SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
678 SFC_EFSYS_UNLOCK(_lockp); \
679 _NOTE(CONSTANTCONDITION); \
684 typedef uint64_t efsys_stat_t;
686 #define EFSYS_STAT_INCR(_knp, _delta) \
688 *(_knp) += (_delta); \
689 _NOTE(CONSTANTCONDITION); \
692 #define EFSYS_STAT_DECR(_knp, _delta) \
694 *(_knp) -= (_delta); \
695 _NOTE(CONSTANTCONDITION); \
698 #define EFSYS_STAT_SET(_knp, _val) \
701 _NOTE(CONSTANTCONDITION); \
704 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
706 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
707 _NOTE(CONSTANTCONDITION); \
710 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
712 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
713 _NOTE(CONSTANTCONDITION); \
716 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
718 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
719 _NOTE(CONSTANTCONDITION); \
722 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
724 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
725 _NOTE(CONSTANTCONDITION); \
730 #if EFSYS_OPT_DECODE_INTR_FATAL
731 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
734 SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
735 (_code), (_dword0), (_dword1)); \
736 _NOTE(CONSTANTCONDITION); \
742 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
743 * so we re-implement it here
745 #ifdef RTE_DEBUG_COMMON_SFC_EFX
746 #define EFSYS_ASSERT(_exp) \
748 if (unlikely(!(_exp))) \
749 rte_panic("line %d\tassert \"%s\" failed\n", \
750 __LINE__, (#_exp)); \
753 #define EFSYS_ASSERT(_exp) (void)(_exp)
756 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
758 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
759 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
760 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
764 #define EFSYS_HAS_ROTL_DWORD 0
768 typedef struct efsys_pci_config_s {
769 struct rte_pci_device *espc_dev;
770 } efsys_pci_config_t;
776 #endif /* _SFC_COMMON_EFSYS_H */