4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _SFC_COMMON_EFSYS_H
33 #define _SFC_COMMON_EFSYS_H
37 #include <rte_spinlock.h>
38 #include <rte_byteorder.h>
39 #include <rte_debug.h>
40 #include <rte_memzone.h>
41 #include <rte_memory.h>
42 #include <rte_memcpy.h>
43 #include <rte_cycles.h>
44 #include <rte_prefetch.h>
45 #include <rte_common.h>
46 #include <rte_malloc.h>
50 #include "sfc_debug.h"
56 #define EFSYS_HAS_UINT64 1
57 #define EFSYS_USE_UINT64 1
58 #define EFSYS_HAS_SSE2_M128 1
60 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
61 #define EFSYS_IS_BIG_ENDIAN 1
62 #define EFSYS_IS_LITTLE_ENDIAN 0
63 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
64 #define EFSYS_IS_BIG_ENDIAN 0
65 #define EFSYS_IS_LITTLE_ENDIAN 1
67 #error "Cannot determine system endianness"
69 #include "efx_types.h"
76 typedef bool boolean_t;
86 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
87 * expression allowed only inside a function, but MAX() is used as
88 * a number of elements in array.
91 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
94 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
97 /* There are macros for alignment in DPDK, but we need to make a proper
98 * correspondence here, if we want to re-use them at all
101 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
105 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
109 #define P2ALIGN(_x, _a) ((_x) & -(_a))
113 #define ISP2(x) rte_is_power_of_2(x)
116 #define ENOTACTIVE ENOTCONN
119 prefetch_read_many(const volatile void *addr)
125 prefetch_read_once(const volatile void *addr)
127 rte_prefetch_non_temporal(addr);
130 /* Modifiers used for Windows builds */
133 #define __in_ecount(_n)
134 #define __in_ecount_opt(_n)
135 #define __in_bcount(_n)
136 #define __in_bcount_opt(_n)
140 #define __out_ecount(_n)
141 #define __out_ecount_opt(_n)
142 #define __out_bcount(_n)
143 #define __out_bcount_opt(_n)
149 #define __inout_ecount(_n)
150 #define __inout_ecount_opt(_n)
151 #define __inout_bcount(_n)
152 #define __inout_bcount_opt(_n)
153 #define __inout_bcount_full_opt(_n)
155 #define __deref_out_bcount_opt(n)
157 #define __checkReturn
158 #define __success(_x)
160 #define __drv_when(_p, _c)
162 /* Code inclusion options */
165 #define EFSYS_OPT_NAMES 1
167 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
168 #define EFSYS_OPT_SIENA 0
169 /* Enable SFN7xxx support */
170 #define EFSYS_OPT_HUNTINGTON 1
171 /* Enable SFN8xxx support */
172 #define EFSYS_OPT_MEDFORD 1
173 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
174 #define EFSYS_OPT_CHECK_REG 1
176 #define EFSYS_OPT_CHECK_REG 0
179 /* MCDI is required for SFN7xxx and SFN8xx */
180 #define EFSYS_OPT_MCDI 1
181 #define EFSYS_OPT_MCDI_LOGGING 1
182 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
184 #define EFSYS_OPT_MAC_STATS 1
186 #define EFSYS_OPT_LOOPBACK 0
188 #define EFSYS_OPT_MON_MCDI 0
189 #define EFSYS_OPT_MON_STATS 0
191 #define EFSYS_OPT_PHY_STATS 0
192 #define EFSYS_OPT_BIST 0
193 #define EFSYS_OPT_PHY_LED_CONTROL 0
194 #define EFSYS_OPT_PHY_FLAGS 0
196 #define EFSYS_OPT_VPD 0
197 #define EFSYS_OPT_NVRAM 0
198 #define EFSYS_OPT_BOOTCFG 0
200 #define EFSYS_OPT_DIAG 0
201 #define EFSYS_OPT_RX_SCALE 1
202 #define EFSYS_OPT_QSTATS 0
203 /* Filters support is required for SFN7xxx and SFN8xx */
204 #define EFSYS_OPT_FILTER 1
205 #define EFSYS_OPT_RX_SCATTER 0
207 #define EFSYS_OPT_EV_PREFETCH 0
209 #define EFSYS_OPT_DECODE_INTR_FATAL 0
211 #define EFSYS_OPT_LICENSING 0
213 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
215 #define EFSYS_OPT_RX_PACKED_STREAM 0
219 typedef struct __efsys_identifier_s efsys_identifier_t;
222 #define EFSYS_PROBE(_name) \
225 #define EFSYS_PROBE1(_name, _type1, _arg1) \
228 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
231 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
235 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
236 _type3, _arg3, _type4, _arg4) \
239 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
240 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
243 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
244 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
248 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
249 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
250 _type6, _arg6, _type7, _arg7) \
256 typedef phys_addr_t efsys_dma_addr_t;
258 typedef struct efsys_mem_s {
259 const struct rte_memzone *esm_mz;
261 * Ideally it should have volatile qualifier to denote that
262 * the memory may be updated by someone else. However, it adds
263 * qualifier discard warnings when the pointer or its derivative
264 * is passed to memset() or rte_mov16().
265 * So, skip the qualifier here, but make sure that it is added
266 * below in access macros.
269 efsys_dma_addr_t esm_addr;
273 #define EFSYS_MEM_ZERO(_esmp, _size) \
275 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
277 _NOTE(CONSTANTCONDITION); \
280 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
282 volatile uint8_t *_base = (_esmp)->esm_base; \
283 volatile uint32_t *_addr; \
285 _NOTE(CONSTANTCONDITION); \
286 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
288 _addr = (volatile uint32_t *)(_base + (_offset)); \
289 (_edp)->ed_u32[0] = _addr[0]; \
291 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
292 uint32_t, (_edp)->ed_u32[0]); \
294 _NOTE(CONSTANTCONDITION); \
297 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
299 volatile uint8_t *_base = (_esmp)->esm_base; \
300 volatile uint64_t *_addr; \
302 _NOTE(CONSTANTCONDITION); \
303 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
305 _addr = (volatile uint64_t *)(_base + (_offset)); \
306 (_eqp)->eq_u64[0] = _addr[0]; \
308 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
309 uint32_t, (_eqp)->eq_u32[1], \
310 uint32_t, (_eqp)->eq_u32[0]); \
312 _NOTE(CONSTANTCONDITION); \
315 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
317 volatile uint8_t *_base = (_esmp)->esm_base; \
318 volatile __m128i *_addr; \
320 _NOTE(CONSTANTCONDITION); \
321 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
323 _addr = (volatile __m128i *)(_base + (_offset)); \
324 (_eop)->eo_u128[0] = _addr[0]; \
326 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
327 uint32_t, (_eop)->eo_u32[3], \
328 uint32_t, (_eop)->eo_u32[2], \
329 uint32_t, (_eop)->eo_u32[1], \
330 uint32_t, (_eop)->eo_u32[0]); \
332 _NOTE(CONSTANTCONDITION); \
336 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
338 volatile uint8_t *_base = (_esmp)->esm_base; \
339 volatile uint32_t *_addr; \
341 _NOTE(CONSTANTCONDITION); \
342 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
344 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
345 uint32_t, (_edp)->ed_u32[0]); \
347 _addr = (volatile uint32_t *)(_base + (_offset)); \
348 _addr[0] = (_edp)->ed_u32[0]; \
350 _NOTE(CONSTANTCONDITION); \
353 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
355 volatile uint8_t *_base = (_esmp)->esm_base; \
356 volatile uint64_t *_addr; \
358 _NOTE(CONSTANTCONDITION); \
359 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
361 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
362 uint32_t, (_eqp)->eq_u32[1], \
363 uint32_t, (_eqp)->eq_u32[0]); \
365 _addr = (volatile uint64_t *)(_base + (_offset)); \
366 _addr[0] = (_eqp)->eq_u64[0]; \
368 _NOTE(CONSTANTCONDITION); \
371 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
373 volatile uint8_t *_base = (_esmp)->esm_base; \
374 volatile __m128i *_addr; \
376 _NOTE(CONSTANTCONDITION); \
377 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
380 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
381 uint32_t, (_eop)->eo_u32[3], \
382 uint32_t, (_eop)->eo_u32[2], \
383 uint32_t, (_eop)->eo_u32[1], \
384 uint32_t, (_eop)->eo_u32[0]); \
386 _addr = (volatile __m128i *)(_base + (_offset)); \
387 _addr[0] = (_eop)->eo_u128[0]; \
389 _NOTE(CONSTANTCONDITION); \
393 #define EFSYS_MEM_ADDR(_esmp) \
396 #define EFSYS_MEM_IS_NULL(_esmp) \
397 ((_esmp)->esm_base == NULL)
399 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
401 volatile uint8_t *_base = (_esmp)->esm_base; \
403 rte_prefetch0(_base + (_offset)); \
409 typedef struct efsys_bar_s {
410 rte_spinlock_t esb_lock;
412 struct rte_pci_device *esb_dev;
414 * Ideally it should have volatile qualifier to denote that
415 * the memory may be updated by someone else. However, it adds
416 * qualifier discard warnings when the pointer or its derivative
417 * is passed to memset() or rte_mov16().
418 * So, skip the qualifier here, but make sure that it is added
419 * below in access macros.
424 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
426 rte_spinlock_init(&(_esbp)->esb_lock); \
427 _NOTE(CONSTANTCONDITION); \
429 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
430 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
431 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
433 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
435 volatile uint8_t *_base = (_esbp)->esb_base; \
436 volatile uint32_t *_addr; \
438 _NOTE(CONSTANTCONDITION); \
439 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
440 _NOTE(CONSTANTCONDITION); \
442 SFC_BAR_LOCK(_esbp); \
444 _addr = (volatile uint32_t *)(_base + (_offset)); \
446 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
448 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
449 uint32_t, (_edp)->ed_u32[0]); \
451 _NOTE(CONSTANTCONDITION); \
453 SFC_BAR_UNLOCK(_esbp); \
454 _NOTE(CONSTANTCONDITION); \
457 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
459 volatile uint8_t *_base = (_esbp)->esb_base; \
460 volatile uint64_t *_addr; \
462 _NOTE(CONSTANTCONDITION); \
463 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
465 SFC_BAR_LOCK(_esbp); \
467 _addr = (volatile uint64_t *)(_base + (_offset)); \
469 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
471 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
472 uint32_t, (_eqp)->eq_u32[1], \
473 uint32_t, (_eqp)->eq_u32[0]); \
475 SFC_BAR_UNLOCK(_esbp); \
476 _NOTE(CONSTANTCONDITION); \
479 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
481 volatile uint8_t *_base = (_esbp)->esb_base; \
482 volatile __m128i *_addr; \
484 _NOTE(CONSTANTCONDITION); \
485 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
487 _NOTE(CONSTANTCONDITION); \
489 SFC_BAR_LOCK(_esbp); \
491 _addr = (volatile __m128i *)(_base + (_offset)); \
493 /* There is no rte_read128_relaxed() yet */ \
494 (_eop)->eo_u128[0] = _addr[0]; \
496 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
497 uint32_t, (_eop)->eo_u32[3], \
498 uint32_t, (_eop)->eo_u32[2], \
499 uint32_t, (_eop)->eo_u32[1], \
500 uint32_t, (_eop)->eo_u32[0]); \
502 _NOTE(CONSTANTCONDITION); \
504 SFC_BAR_UNLOCK(_esbp); \
505 _NOTE(CONSTANTCONDITION); \
509 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
511 volatile uint8_t *_base = (_esbp)->esb_base; \
512 volatile uint32_t *_addr; \
514 _NOTE(CONSTANTCONDITION); \
515 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
517 _NOTE(CONSTANTCONDITION); \
519 SFC_BAR_LOCK(_esbp); \
521 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
522 uint32_t, (_edp)->ed_u32[0]); \
524 _addr = (volatile uint32_t *)(_base + (_offset)); \
525 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
528 _NOTE(CONSTANTCONDITION); \
530 SFC_BAR_UNLOCK(_esbp); \
531 _NOTE(CONSTANTCONDITION); \
534 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
536 volatile uint8_t *_base = (_esbp)->esb_base; \
537 volatile uint64_t *_addr; \
539 _NOTE(CONSTANTCONDITION); \
540 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
542 SFC_BAR_LOCK(_esbp); \
544 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
545 uint32_t, (_eqp)->eq_u32[1], \
546 uint32_t, (_eqp)->eq_u32[0]); \
548 _addr = (volatile uint64_t *)(_base + (_offset)); \
549 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
552 SFC_BAR_UNLOCK(_esbp); \
553 _NOTE(CONSTANTCONDITION); \
557 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
558 * (required by PIO hardware).
560 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
561 * write-combined memory mapped to user-land, so just abort if used.
563 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
565 rte_panic("Write-combined BAR access not supported"); \
568 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
570 volatile uint8_t *_base = (_esbp)->esb_base; \
571 volatile __m128i *_addr; \
573 _NOTE(CONSTANTCONDITION); \
574 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
576 _NOTE(CONSTANTCONDITION); \
578 SFC_BAR_LOCK(_esbp); \
580 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
581 uint32_t, (_eop)->eo_u32[3], \
582 uint32_t, (_eop)->eo_u32[2], \
583 uint32_t, (_eop)->eo_u32[1], \
584 uint32_t, (_eop)->eo_u32[0]); \
586 _addr = (volatile __m128i *)(_base + (_offset)); \
587 /* There is no rte_write128_relaxed() yet */ \
588 _addr[0] = (_eop)->eo_u128[0]; \
591 _NOTE(CONSTANTCONDITION); \
593 SFC_BAR_UNLOCK(_esbp); \
594 _NOTE(CONSTANTCONDITION); \
597 /* Use the standard octo-word write for doorbell writes */
598 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
600 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
601 _NOTE(CONSTANTCONDITION); \
606 #define EFSYS_SPIN(_us) \
609 _NOTE(CONSTANTCONDITION); \
612 #define EFSYS_SLEEP EFSYS_SPIN
616 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
617 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
622 * DPDK does not provide any DMA syncing API, and no PMD drivers
623 * have any traces of explicit DMA syncing.
624 * DMA mapping is assumed to be coherent.
627 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
629 /* Just avoid store and compiler (impliciltly) reordering */
630 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
634 typedef uint64_t efsys_timestamp_t;
636 #define EFSYS_TIMESTAMP(_usp) \
638 *(_usp) = rte_get_timer_cycles() * 1000000 / \
639 rte_get_timer_hz(); \
640 _NOTE(CONSTANTCONDITION); \
645 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
648 (_p) = rte_zmalloc("sfc", (_size), 0); \
649 _NOTE(CONSTANTCONDITION); \
652 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
657 _NOTE(CONSTANTCONDITION); \
662 typedef rte_spinlock_t efsys_lock_t;
664 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
665 rte_spinlock_init((_eslp))
666 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
667 #define SFC_EFSYS_LOCK(_eslp) \
668 rte_spinlock_lock((_eslp))
669 #define SFC_EFSYS_UNLOCK(_eslp) \
670 rte_spinlock_unlock((_eslp))
671 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
672 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
674 typedef int efsys_lock_state_t;
676 #define EFSYS_LOCK_MAGIC 0x000010c4
678 #define EFSYS_LOCK(_lockp, _state) \
680 SFC_EFSYS_LOCK(_lockp); \
681 (_state) = EFSYS_LOCK_MAGIC; \
682 _NOTE(CONSTANTCONDITION); \
685 #define EFSYS_UNLOCK(_lockp, _state) \
687 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
688 SFC_EFSYS_UNLOCK(_lockp); \
689 _NOTE(CONSTANTCONDITION); \
694 typedef uint64_t efsys_stat_t;
696 #define EFSYS_STAT_INCR(_knp, _delta) \
698 *(_knp) += (_delta); \
699 _NOTE(CONSTANTCONDITION); \
702 #define EFSYS_STAT_DECR(_knp, _delta) \
704 *(_knp) -= (_delta); \
705 _NOTE(CONSTANTCONDITION); \
708 #define EFSYS_STAT_SET(_knp, _val) \
711 _NOTE(CONSTANTCONDITION); \
714 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
716 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
717 _NOTE(CONSTANTCONDITION); \
720 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
722 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
723 _NOTE(CONSTANTCONDITION); \
726 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
728 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
729 _NOTE(CONSTANTCONDITION); \
732 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
734 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
735 _NOTE(CONSTANTCONDITION); \
740 #if EFSYS_OPT_DECODE_INTR_FATAL
741 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
744 RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
745 (_code), (_dword0), (_dword1)); \
746 _NOTE(CONSTANTCONDITION); \
752 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
753 * so we re-implement it here
755 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
756 #define EFSYS_ASSERT(_exp) \
758 if (unlikely(!(_exp))) \
759 rte_panic("line %d\tassert \"%s\" failed\n", \
760 __LINE__, (#_exp)); \
763 #define EFSYS_ASSERT(_exp) (void)(_exp)
766 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
768 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
769 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
770 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
774 #define EFSYS_HAS_ROTL_DWORD 0
780 #endif /* _SFC_COMMON_EFSYS_H */