1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
28 #include "sfc_efx_debug.h"
29 #include "sfc_efx_log.h"
35 #define LIBEFX_API __rte_internal
37 /* No specific decorations required since functions are local by default */
38 #define LIBEFX_INTERNAL
40 #define EFSYS_HAS_UINT64 1
41 #define EFSYS_USE_UINT64 1
43 * __SSE2__ is defined by a compiler if target architecture supports
44 * Streaming SIMD Extensions 2 (SSE2). __m128i is a data type used
45 * by the extension instructions.
48 #define EFSYS_HAS_UINT128 1
49 typedef __m128i efsys_uint128_t;
51 * __int128 and unsigned __int128 are compiler extensions (built-in types).
52 * __SIZEOF_INT128__ is defined by the compiler if these data types are
55 #elif defined(__SIZEOF_INT128__)
56 #define EFSYS_HAS_UINT128 1
57 typedef unsigned __int128 efsys_uint128_t;
59 #error Unsigned 128-bit width integers support is required
62 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
63 #define EFSYS_IS_BIG_ENDIAN 1
64 #define EFSYS_IS_LITTLE_ENDIAN 0
65 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
66 #define EFSYS_IS_BIG_ENDIAN 0
67 #define EFSYS_IS_LITTLE_ENDIAN 1
69 #error "Cannot determine system endianness"
73 typedef bool boolean_t;
83 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
84 * expression allowed only inside a function, but MAX() is used as
85 * a number of elements in array.
88 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
91 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
95 #define ISP2(x) rte_is_power_of_2(x)
98 #define ENOTACTIVE ENOTCONN
101 prefetch_read_many(const volatile void *addr)
107 prefetch_read_once(const volatile void *addr)
109 rte_prefetch_non_temporal(addr);
112 /* Code inclusion options */
115 #define EFSYS_OPT_NAMES 1
117 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
118 #define EFSYS_OPT_SIENA 0
119 /* Enable SFN7xxx support */
120 #define EFSYS_OPT_HUNTINGTON 1
121 /* Enable SFN8xxx support */
122 #define EFSYS_OPT_MEDFORD 1
123 /* Enable SFN2xxx support */
124 #define EFSYS_OPT_MEDFORD2 1
125 /* Enable Riverhead support */
126 #define EFSYS_OPT_RIVERHEAD 1
128 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
129 #define EFSYS_OPT_CHECK_REG 1
131 #define EFSYS_OPT_CHECK_REG 0
134 /* MCDI is required for SFN7xxx and SFN8xx */
135 #define EFSYS_OPT_MCDI 1
136 #define EFSYS_OPT_MCDI_LOGGING 1
137 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
139 #define EFSYS_OPT_MAC_STATS 1
141 #define EFSYS_OPT_LOOPBACK 1
143 #define EFSYS_OPT_MON_MCDI 0
144 #define EFSYS_OPT_MON_STATS 0
146 #define EFSYS_OPT_PHY_STATS 0
147 #define EFSYS_OPT_BIST 0
148 #define EFSYS_OPT_PHY_LED_CONTROL 0
149 #define EFSYS_OPT_PHY_FLAGS 0
151 #define EFSYS_OPT_VPD 0
152 #define EFSYS_OPT_NVRAM 0
153 #define EFSYS_OPT_BOOTCFG 0
154 #define EFSYS_OPT_IMAGE_LAYOUT 0
156 #define EFSYS_OPT_DIAG 0
157 #define EFSYS_OPT_RX_SCALE 1
158 #define EFSYS_OPT_QSTATS 0
159 /* Filters support is required for SFN7xxx and SFN8xx */
160 #define EFSYS_OPT_FILTER 1
161 #define EFSYS_OPT_RX_SCATTER 0
163 #define EFSYS_OPT_EV_EXTENDED_WIDTH 0
164 #define EFSYS_OPT_EV_PREFETCH 0
166 #define EFSYS_OPT_DECODE_INTR_FATAL 0
168 #define EFSYS_OPT_LICENSING 0
170 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
172 #define EFSYS_OPT_RX_PACKED_STREAM 0
174 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
176 #define EFSYS_OPT_TUNNEL 1
178 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
180 #define EFSYS_OPT_EVB 1
182 #define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
184 #define EFSYS_OPT_PCI 1
186 #define EFSYS_OPT_DESC_PROXY 0
188 #define EFSYS_OPT_MAE 1
192 typedef struct __efsys_identifier_s efsys_identifier_t;
195 #define EFSYS_PROBE(_name) \
198 #define EFSYS_PROBE1(_name, _type1, _arg1) \
201 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
204 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
208 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
209 _type3, _arg3, _type4, _arg4) \
212 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
213 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
216 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
217 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
221 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
222 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
223 _type6, _arg6, _type7, _arg7) \
229 typedef rte_iova_t efsys_dma_addr_t;
231 typedef struct efsys_mem_s {
232 const struct rte_memzone *esm_mz;
234 * Ideally it should have volatile qualifier to denote that
235 * the memory may be updated by someone else. However, it adds
236 * qualifier discard warnings when the pointer or its derivative
237 * is passed to memset() or rte_mov16().
238 * So, skip the qualifier here, but make sure that it is added
239 * below in access macros.
242 efsys_dma_addr_t esm_addr;
246 #define EFSYS_MEM_ZERO(_esmp, _size) \
248 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
250 _NOTE(CONSTANTCONDITION); \
253 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
255 volatile uint8_t *_base = (_esmp)->esm_base; \
256 volatile uint32_t *_addr; \
258 _NOTE(CONSTANTCONDITION); \
259 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
260 sizeof(efx_dword_t))); \
262 _addr = (volatile uint32_t *)(_base + (_offset)); \
263 (_edp)->ed_u32[0] = _addr[0]; \
265 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
266 uint32_t, (_edp)->ed_u32[0]); \
268 _NOTE(CONSTANTCONDITION); \
271 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
273 volatile uint8_t *_base = (_esmp)->esm_base; \
274 volatile uint64_t *_addr; \
276 _NOTE(CONSTANTCONDITION); \
277 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
278 sizeof(efx_qword_t))); \
280 _addr = (volatile uint64_t *)(_base + (_offset)); \
281 (_eqp)->eq_u64[0] = _addr[0]; \
283 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
284 uint32_t, (_eqp)->eq_u32[1], \
285 uint32_t, (_eqp)->eq_u32[0]); \
287 _NOTE(CONSTANTCONDITION); \
290 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
292 volatile uint8_t *_base = (_esmp)->esm_base; \
293 volatile efsys_uint128_t *_addr; \
295 _NOTE(CONSTANTCONDITION); \
296 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
297 sizeof(efx_oword_t))); \
299 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
300 (_eop)->eo_u128[0] = _addr[0]; \
302 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
303 uint32_t, (_eop)->eo_u32[3], \
304 uint32_t, (_eop)->eo_u32[2], \
305 uint32_t, (_eop)->eo_u32[1], \
306 uint32_t, (_eop)->eo_u32[0]); \
308 _NOTE(CONSTANTCONDITION); \
312 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
314 volatile uint8_t *_base = (_esmp)->esm_base; \
315 volatile uint32_t *_addr; \
317 _NOTE(CONSTANTCONDITION); \
318 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
319 sizeof(efx_dword_t))); \
321 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
322 uint32_t, (_edp)->ed_u32[0]); \
324 _addr = (volatile uint32_t *)(_base + (_offset)); \
325 _addr[0] = (_edp)->ed_u32[0]; \
327 _NOTE(CONSTANTCONDITION); \
330 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
332 volatile uint8_t *_base = (_esmp)->esm_base; \
333 volatile uint64_t *_addr; \
335 _NOTE(CONSTANTCONDITION); \
336 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
337 sizeof(efx_qword_t))); \
339 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
340 uint32_t, (_eqp)->eq_u32[1], \
341 uint32_t, (_eqp)->eq_u32[0]); \
343 _addr = (volatile uint64_t *)(_base + (_offset)); \
344 _addr[0] = (_eqp)->eq_u64[0]; \
346 _NOTE(CONSTANTCONDITION); \
349 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
351 volatile uint8_t *_base = (_esmp)->esm_base; \
352 volatile efsys_uint128_t *_addr; \
354 _NOTE(CONSTANTCONDITION); \
355 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
356 sizeof(efx_oword_t))); \
359 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
360 uint32_t, (_eop)->eo_u32[3], \
361 uint32_t, (_eop)->eo_u32[2], \
362 uint32_t, (_eop)->eo_u32[1], \
363 uint32_t, (_eop)->eo_u32[0]); \
365 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
366 _addr[0] = (_eop)->eo_u128[0]; \
368 _NOTE(CONSTANTCONDITION); \
372 #define EFSYS_MEM_SIZE(_esmp) \
373 ((_esmp)->esm_mz->len)
375 #define EFSYS_MEM_ADDR(_esmp) \
378 #define EFSYS_MEM_IS_NULL(_esmp) \
379 ((_esmp)->esm_base == NULL)
381 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
383 volatile uint8_t *_base = (_esmp)->esm_base; \
385 rte_prefetch0(_base + (_offset)); \
391 typedef struct efsys_bar_s {
392 rte_spinlock_t esb_lock;
394 struct rte_pci_device *esb_dev;
396 * Ideally it should have volatile qualifier to denote that
397 * the memory may be updated by someone else. However, it adds
398 * qualifier discard warnings when the pointer or its derivative
399 * is passed to memset() or rte_mov16().
400 * So, skip the qualifier here, but make sure that it is added
401 * below in access macros.
406 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
408 rte_spinlock_init(&(_esbp)->esb_lock); \
409 _NOTE(CONSTANTCONDITION); \
411 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
412 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
413 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
415 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
417 volatile uint8_t *_base = (_esbp)->esb_base; \
418 volatile uint32_t *_addr; \
420 _NOTE(CONSTANTCONDITION); \
421 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
422 sizeof(efx_dword_t))); \
423 _NOTE(CONSTANTCONDITION); \
425 SFC_BAR_LOCK(_esbp); \
427 _addr = (volatile uint32_t *)(_base + (_offset)); \
429 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
431 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
432 uint32_t, (_edp)->ed_u32[0]); \
434 _NOTE(CONSTANTCONDITION); \
436 SFC_BAR_UNLOCK(_esbp); \
437 _NOTE(CONSTANTCONDITION); \
440 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
442 volatile uint8_t *_base = (_esbp)->esb_base; \
443 volatile uint64_t *_addr; \
445 _NOTE(CONSTANTCONDITION); \
446 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
447 sizeof(efx_qword_t))); \
449 SFC_BAR_LOCK(_esbp); \
451 _addr = (volatile uint64_t *)(_base + (_offset)); \
453 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
455 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
456 uint32_t, (_eqp)->eq_u32[1], \
457 uint32_t, (_eqp)->eq_u32[0]); \
459 SFC_BAR_UNLOCK(_esbp); \
460 _NOTE(CONSTANTCONDITION); \
463 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
465 volatile uint8_t *_base = (_esbp)->esb_base; \
466 volatile efsys_uint128_t *_addr; \
468 _NOTE(CONSTANTCONDITION); \
469 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
470 sizeof(efx_oword_t))); \
472 _NOTE(CONSTANTCONDITION); \
474 SFC_BAR_LOCK(_esbp); \
476 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
478 /* There is no rte_read128_relaxed() yet */ \
479 (_eop)->eo_u128[0] = _addr[0]; \
481 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
482 uint32_t, (_eop)->eo_u32[3], \
483 uint32_t, (_eop)->eo_u32[2], \
484 uint32_t, (_eop)->eo_u32[1], \
485 uint32_t, (_eop)->eo_u32[0]); \
487 _NOTE(CONSTANTCONDITION); \
489 SFC_BAR_UNLOCK(_esbp); \
490 _NOTE(CONSTANTCONDITION); \
494 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
496 volatile uint8_t *_base = (_esbp)->esb_base; \
497 volatile uint32_t *_addr; \
499 _NOTE(CONSTANTCONDITION); \
500 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
501 sizeof(efx_dword_t))); \
503 _NOTE(CONSTANTCONDITION); \
505 SFC_BAR_LOCK(_esbp); \
507 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
508 uint32_t, (_edp)->ed_u32[0]); \
510 _addr = (volatile uint32_t *)(_base + (_offset)); \
511 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
514 _NOTE(CONSTANTCONDITION); \
516 SFC_BAR_UNLOCK(_esbp); \
517 _NOTE(CONSTANTCONDITION); \
520 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
522 volatile uint8_t *_base = (_esbp)->esb_base; \
523 volatile uint64_t *_addr; \
525 _NOTE(CONSTANTCONDITION); \
526 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
527 sizeof(efx_qword_t))); \
529 SFC_BAR_LOCK(_esbp); \
531 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
532 uint32_t, (_eqp)->eq_u32[1], \
533 uint32_t, (_eqp)->eq_u32[0]); \
535 _addr = (volatile uint64_t *)(_base + (_offset)); \
536 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
539 SFC_BAR_UNLOCK(_esbp); \
540 _NOTE(CONSTANTCONDITION); \
544 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
545 * (required by PIO hardware).
547 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
548 * write-combined memory mapped to user-land, so just abort if used.
550 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
552 rte_panic("Write-combined BAR access not supported"); \
555 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
557 volatile uint8_t *_base = (_esbp)->esb_base; \
558 volatile efsys_uint128_t *_addr; \
560 _NOTE(CONSTANTCONDITION); \
561 SFC_EFX_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
562 sizeof(efx_oword_t))); \
564 _NOTE(CONSTANTCONDITION); \
566 SFC_BAR_LOCK(_esbp); \
568 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
569 uint32_t, (_eop)->eo_u32[3], \
570 uint32_t, (_eop)->eo_u32[2], \
571 uint32_t, (_eop)->eo_u32[1], \
572 uint32_t, (_eop)->eo_u32[0]); \
574 _addr = (volatile efsys_uint128_t *)(_base + (_offset));\
575 /* There is no rte_write128_relaxed() yet */ \
576 _addr[0] = (_eop)->eo_u128[0]; \
579 _NOTE(CONSTANTCONDITION); \
581 SFC_BAR_UNLOCK(_esbp); \
582 _NOTE(CONSTANTCONDITION); \
585 /* Use the standard octo-word write for doorbell writes */
586 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
588 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
589 _NOTE(CONSTANTCONDITION); \
594 #define EFSYS_SPIN(_us) \
597 _NOTE(CONSTANTCONDITION); \
600 #define EFSYS_SLEEP EFSYS_SPIN
604 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
605 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
610 * DPDK does not provide any DMA syncing API, and no PMD drivers
611 * have any traces of explicit DMA syncing.
612 * DMA mapping is assumed to be coherent.
615 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
617 /* Just avoid store and compiler (impliciltly) reordering */
618 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
622 typedef uint64_t efsys_timestamp_t;
624 #define EFSYS_TIMESTAMP(_usp) \
626 *(_usp) = rte_get_timer_cycles() * 1000000 / \
627 rte_get_timer_hz(); \
628 _NOTE(CONSTANTCONDITION); \
633 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
636 (_p) = rte_zmalloc("sfc", (_size), 0); \
637 _NOTE(CONSTANTCONDITION); \
640 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
645 _NOTE(CONSTANTCONDITION); \
650 typedef rte_spinlock_t efsys_lock_t;
652 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
653 rte_spinlock_init((_eslp))
654 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
655 #define SFC_EFSYS_LOCK(_eslp) \
656 rte_spinlock_lock((_eslp))
657 #define SFC_EFSYS_UNLOCK(_eslp) \
658 rte_spinlock_unlock((_eslp))
659 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
660 SFC_EFX_ASSERT(rte_spinlock_is_locked((_eslp)))
662 typedef int efsys_lock_state_t;
664 #define EFSYS_LOCK_MAGIC 0x000010c4
666 #define EFSYS_LOCK(_lockp, _state) \
668 SFC_EFSYS_LOCK(_lockp); \
669 (_state) = EFSYS_LOCK_MAGIC; \
670 _NOTE(CONSTANTCONDITION); \
673 #define EFSYS_UNLOCK(_lockp, _state) \
675 SFC_EFX_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
676 SFC_EFSYS_UNLOCK(_lockp); \
677 _NOTE(CONSTANTCONDITION); \
682 typedef uint64_t efsys_stat_t;
684 #define EFSYS_STAT_INCR(_knp, _delta) \
686 *(_knp) += (_delta); \
687 _NOTE(CONSTANTCONDITION); \
690 #define EFSYS_STAT_DECR(_knp, _delta) \
692 *(_knp) -= (_delta); \
693 _NOTE(CONSTANTCONDITION); \
696 #define EFSYS_STAT_SET(_knp, _val) \
699 _NOTE(CONSTANTCONDITION); \
702 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
704 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
705 _NOTE(CONSTANTCONDITION); \
708 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
710 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
711 _NOTE(CONSTANTCONDITION); \
714 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
716 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
717 _NOTE(CONSTANTCONDITION); \
720 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
722 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
723 _NOTE(CONSTANTCONDITION); \
728 #if EFSYS_OPT_DECODE_INTR_FATAL
729 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
732 SFC_EFX_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
733 (_code), (_dword0), (_dword1)); \
734 _NOTE(CONSTANTCONDITION); \
740 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
741 * so we re-implement it here
743 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
744 #define EFSYS_ASSERT(_exp) \
746 if (unlikely(!(_exp))) \
747 rte_panic("line %d\tassert \"%s\" failed\n", \
748 __LINE__, (#_exp)); \
751 #define EFSYS_ASSERT(_exp) (void)(_exp)
754 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
756 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
757 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
758 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
762 #define EFSYS_HAS_ROTL_DWORD 0
766 typedef struct efsys_pci_config_s {
767 struct rte_pci_device *espc_dev;
768 } efsys_pci_config_t;
774 #endif /* _SFC_COMMON_EFSYS_H */