2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #ifndef _SFC_COMMON_EFSYS_H
31 #define _SFC_COMMON_EFSYS_H
35 #include <rte_spinlock.h>
36 #include <rte_byteorder.h>
37 #include <rte_debug.h>
38 #include <rte_memzone.h>
39 #include <rte_memory.h>
40 #include <rte_memcpy.h>
41 #include <rte_cycles.h>
42 #include <rte_prefetch.h>
43 #include <rte_common.h>
44 #include <rte_malloc.h>
48 #include "sfc_debug.h"
54 #define EFSYS_HAS_UINT64 1
55 #define EFSYS_USE_UINT64 1
56 #define EFSYS_HAS_SSE2_M128 1
58 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
59 #define EFSYS_IS_BIG_ENDIAN 1
60 #define EFSYS_IS_LITTLE_ENDIAN 0
61 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
62 #define EFSYS_IS_BIG_ENDIAN 0
63 #define EFSYS_IS_LITTLE_ENDIAN 1
65 #error "Cannot determine system endianness"
67 #include "efx_types.h"
74 typedef bool boolean_t;
84 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
85 * expression allowed only inside a function, but MAX() is used as
86 * a number of elements in array.
89 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
92 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
95 /* There are macros for alignment in DPDK, but we need to make a proper
96 * correspondence here, if we want to re-use them at all
99 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
103 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
107 #define P2ALIGN(_x, _a) ((_x) & -(_a))
111 #define ISP2(x) rte_is_power_of_2(x)
114 #define ENOTACTIVE ENOTCONN
117 prefetch_read_many(const volatile void *addr)
123 prefetch_read_once(const volatile void *addr)
125 rte_prefetch_non_temporal(addr);
128 /* Modifiers used for Windows builds */
131 #define __in_ecount(_n)
132 #define __in_ecount_opt(_n)
133 #define __in_bcount(_n)
134 #define __in_bcount_opt(_n)
138 #define __out_ecount(_n)
139 #define __out_ecount_opt(_n)
140 #define __out_bcount(_n)
141 #define __out_bcount_opt(_n)
147 #define __inout_ecount(_n)
148 #define __inout_ecount_opt(_n)
149 #define __inout_bcount(_n)
150 #define __inout_bcount_opt(_n)
151 #define __inout_bcount_full_opt(_n)
153 #define __deref_out_bcount_opt(n)
155 #define __checkReturn
156 #define __success(_x)
158 #define __drv_when(_p, _c)
160 /* Code inclusion options */
163 #define EFSYS_OPT_NAMES 1
165 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
166 #define EFSYS_OPT_SIENA 0
167 /* Enable SFN7xxx support */
168 #define EFSYS_OPT_HUNTINGTON 1
169 /* Enable SFN8xxx support */
170 #define EFSYS_OPT_MEDFORD 1
171 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
172 #define EFSYS_OPT_CHECK_REG 1
174 #define EFSYS_OPT_CHECK_REG 0
177 /* MCDI is required for SFN7xxx and SFN8xx */
178 #define EFSYS_OPT_MCDI 1
179 #define EFSYS_OPT_MCDI_LOGGING 1
180 #define EFSYS_OPT_MCDI_PROXY_AUTH 0
182 #define EFSYS_OPT_MAC_STATS 1
184 #define EFSYS_OPT_LOOPBACK 0
186 #define EFSYS_OPT_MON_MCDI 0
187 #define EFSYS_OPT_MON_STATS 0
189 #define EFSYS_OPT_PHY_STATS 0
190 #define EFSYS_OPT_BIST 0
191 #define EFSYS_OPT_PHY_LED_CONTROL 0
192 #define EFSYS_OPT_PHY_FLAGS 0
194 #define EFSYS_OPT_VPD 0
195 #define EFSYS_OPT_NVRAM 0
196 #define EFSYS_OPT_BOOTCFG 0
198 #define EFSYS_OPT_DIAG 0
199 #define EFSYS_OPT_RX_SCALE 1
200 #define EFSYS_OPT_QSTATS 0
201 /* Filters support is required for SFN7xxx and SFN8xx */
202 #define EFSYS_OPT_FILTER 1
203 #define EFSYS_OPT_RX_SCATTER 0
205 #define EFSYS_OPT_EV_PREFETCH 0
207 #define EFSYS_OPT_DECODE_INTR_FATAL 0
209 #define EFSYS_OPT_LICENSING 0
211 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
213 #define EFSYS_OPT_RX_PACKED_STREAM 0
217 typedef struct __efsys_identifier_s efsys_identifier_t;
220 #define EFSYS_PROBE(_name) \
223 #define EFSYS_PROBE1(_name, _type1, _arg1) \
226 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
229 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
233 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
234 _type3, _arg3, _type4, _arg4) \
237 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
238 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
241 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
242 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
246 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
247 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
248 _type6, _arg6, _type7, _arg7) \
254 typedef phys_addr_t efsys_dma_addr_t;
256 typedef struct efsys_mem_s {
257 const struct rte_memzone *esm_mz;
259 * Ideally it should have volatile qualifier to denote that
260 * the memory may be updated by someone else. However, it adds
261 * qualifier discard warnings when the pointer or its derivative
262 * is passed to memset() or rte_mov16().
263 * So, skip the qualifier here, but make sure that it is added
264 * below in access macros.
267 efsys_dma_addr_t esm_addr;
271 #define EFSYS_MEM_ZERO(_esmp, _size) \
273 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
275 _NOTE(CONSTANTCONDITION); \
278 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
280 volatile uint8_t *_base = (_esmp)->esm_base; \
281 volatile uint32_t *_addr; \
283 _NOTE(CONSTANTCONDITION); \
284 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
286 _addr = (volatile uint32_t *)(_base + (_offset)); \
287 (_edp)->ed_u32[0] = _addr[0]; \
289 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
290 uint32_t, (_edp)->ed_u32[0]); \
292 _NOTE(CONSTANTCONDITION); \
295 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
297 volatile uint8_t *_base = (_esmp)->esm_base; \
298 volatile uint64_t *_addr; \
300 _NOTE(CONSTANTCONDITION); \
301 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
303 _addr = (volatile uint64_t *)(_base + (_offset)); \
304 (_eqp)->eq_u64[0] = _addr[0]; \
306 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
307 uint32_t, (_eqp)->eq_u32[1], \
308 uint32_t, (_eqp)->eq_u32[0]); \
310 _NOTE(CONSTANTCONDITION); \
313 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
315 volatile uint8_t *_base = (_esmp)->esm_base; \
316 volatile __m128i *_addr; \
318 _NOTE(CONSTANTCONDITION); \
319 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
321 _addr = (volatile __m128i *)(_base + (_offset)); \
322 (_eop)->eo_u128[0] = _addr[0]; \
324 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
325 uint32_t, (_eop)->eo_u32[3], \
326 uint32_t, (_eop)->eo_u32[2], \
327 uint32_t, (_eop)->eo_u32[1], \
328 uint32_t, (_eop)->eo_u32[0]); \
330 _NOTE(CONSTANTCONDITION); \
334 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
336 volatile uint8_t *_base = (_esmp)->esm_base; \
337 volatile uint32_t *_addr; \
339 _NOTE(CONSTANTCONDITION); \
340 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
342 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
343 uint32_t, (_edp)->ed_u32[0]); \
345 _addr = (volatile uint32_t *)(_base + (_offset)); \
346 _addr[0] = (_edp)->ed_u32[0]; \
348 _NOTE(CONSTANTCONDITION); \
351 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
353 volatile uint8_t *_base = (_esmp)->esm_base; \
354 volatile uint64_t *_addr; \
356 _NOTE(CONSTANTCONDITION); \
357 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
359 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
360 uint32_t, (_eqp)->eq_u32[1], \
361 uint32_t, (_eqp)->eq_u32[0]); \
363 _addr = (volatile uint64_t *)(_base + (_offset)); \
364 _addr[0] = (_eqp)->eq_u64[0]; \
366 _NOTE(CONSTANTCONDITION); \
369 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
371 volatile uint8_t *_base = (_esmp)->esm_base; \
372 volatile __m128i *_addr; \
374 _NOTE(CONSTANTCONDITION); \
375 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
378 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
379 uint32_t, (_eop)->eo_u32[3], \
380 uint32_t, (_eop)->eo_u32[2], \
381 uint32_t, (_eop)->eo_u32[1], \
382 uint32_t, (_eop)->eo_u32[0]); \
384 _addr = (volatile __m128i *)(_base + (_offset)); \
385 _addr[0] = (_eop)->eo_u128[0]; \
387 _NOTE(CONSTANTCONDITION); \
391 #define EFSYS_MEM_ADDR(_esmp) \
394 #define EFSYS_MEM_IS_NULL(_esmp) \
395 ((_esmp)->esm_base == NULL)
397 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
399 volatile uint8_t *_base = (_esmp)->esm_base; \
401 rte_prefetch0(_base + (_offset)); \
407 typedef struct efsys_bar_s {
408 rte_spinlock_t esb_lock;
410 struct rte_pci_device *esb_dev;
412 * Ideally it should have volatile qualifier to denote that
413 * the memory may be updated by someone else. However, it adds
414 * qualifier discard warnings when the pointer or its derivative
415 * is passed to memset() or rte_mov16().
416 * So, skip the qualifier here, but make sure that it is added
417 * below in access macros.
422 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
424 rte_spinlock_init(&(_esbp)->esb_lock); \
425 _NOTE(CONSTANTCONDITION); \
427 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
428 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
429 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
431 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
433 volatile uint8_t *_base = (_esbp)->esb_base; \
434 volatile uint32_t *_addr; \
436 _NOTE(CONSTANTCONDITION); \
437 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
438 _NOTE(CONSTANTCONDITION); \
440 SFC_BAR_LOCK(_esbp); \
442 _addr = (volatile uint32_t *)(_base + (_offset)); \
444 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
446 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
447 uint32_t, (_edp)->ed_u32[0]); \
449 _NOTE(CONSTANTCONDITION); \
451 SFC_BAR_UNLOCK(_esbp); \
452 _NOTE(CONSTANTCONDITION); \
455 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
457 volatile uint8_t *_base = (_esbp)->esb_base; \
458 volatile uint64_t *_addr; \
460 _NOTE(CONSTANTCONDITION); \
461 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
463 SFC_BAR_LOCK(_esbp); \
465 _addr = (volatile uint64_t *)(_base + (_offset)); \
467 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
469 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
470 uint32_t, (_eqp)->eq_u32[1], \
471 uint32_t, (_eqp)->eq_u32[0]); \
473 SFC_BAR_UNLOCK(_esbp); \
474 _NOTE(CONSTANTCONDITION); \
477 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
479 volatile uint8_t *_base = (_esbp)->esb_base; \
480 volatile __m128i *_addr; \
482 _NOTE(CONSTANTCONDITION); \
483 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
485 _NOTE(CONSTANTCONDITION); \
487 SFC_BAR_LOCK(_esbp); \
489 _addr = (volatile __m128i *)(_base + (_offset)); \
491 /* There is no rte_read128_relaxed() yet */ \
492 (_eop)->eo_u128[0] = _addr[0]; \
494 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
495 uint32_t, (_eop)->eo_u32[3], \
496 uint32_t, (_eop)->eo_u32[2], \
497 uint32_t, (_eop)->eo_u32[1], \
498 uint32_t, (_eop)->eo_u32[0]); \
500 _NOTE(CONSTANTCONDITION); \
502 SFC_BAR_UNLOCK(_esbp); \
503 _NOTE(CONSTANTCONDITION); \
507 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
509 volatile uint8_t *_base = (_esbp)->esb_base; \
510 volatile uint32_t *_addr; \
512 _NOTE(CONSTANTCONDITION); \
513 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
515 _NOTE(CONSTANTCONDITION); \
517 SFC_BAR_LOCK(_esbp); \
519 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
520 uint32_t, (_edp)->ed_u32[0]); \
522 _addr = (volatile uint32_t *)(_base + (_offset)); \
523 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
526 _NOTE(CONSTANTCONDITION); \
528 SFC_BAR_UNLOCK(_esbp); \
529 _NOTE(CONSTANTCONDITION); \
532 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
534 volatile uint8_t *_base = (_esbp)->esb_base; \
535 volatile uint64_t *_addr; \
537 _NOTE(CONSTANTCONDITION); \
538 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
540 SFC_BAR_LOCK(_esbp); \
542 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
543 uint32_t, (_eqp)->eq_u32[1], \
544 uint32_t, (_eqp)->eq_u32[0]); \
546 _addr = (volatile uint64_t *)(_base + (_offset)); \
547 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
550 SFC_BAR_UNLOCK(_esbp); \
551 _NOTE(CONSTANTCONDITION); \
555 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
556 * (required by PIO hardware).
558 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
559 * write-combined memory mapped to user-land, so just abort if used.
561 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
563 rte_panic("Write-combined BAR access not supported"); \
566 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
568 volatile uint8_t *_base = (_esbp)->esb_base; \
569 volatile __m128i *_addr; \
571 _NOTE(CONSTANTCONDITION); \
572 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
574 _NOTE(CONSTANTCONDITION); \
576 SFC_BAR_LOCK(_esbp); \
578 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
579 uint32_t, (_eop)->eo_u32[3], \
580 uint32_t, (_eop)->eo_u32[2], \
581 uint32_t, (_eop)->eo_u32[1], \
582 uint32_t, (_eop)->eo_u32[0]); \
584 _addr = (volatile __m128i *)(_base + (_offset)); \
585 /* There is no rte_write128_relaxed() yet */ \
586 _addr[0] = (_eop)->eo_u128[0]; \
589 _NOTE(CONSTANTCONDITION); \
591 SFC_BAR_UNLOCK(_esbp); \
592 _NOTE(CONSTANTCONDITION); \
595 /* Use the standard octo-word write for doorbell writes */
596 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
598 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
599 _NOTE(CONSTANTCONDITION); \
604 #define EFSYS_SPIN(_us) \
607 _NOTE(CONSTANTCONDITION); \
610 #define EFSYS_SLEEP EFSYS_SPIN
614 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
615 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
620 * DPDK does not provide any DMA syncing API, and no PMD drivers
621 * have any traces of explicit DMA syncing.
622 * DMA mapping is assumed to be coherent.
625 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
627 /* Just avoid store and compiler (impliciltly) reordering */
628 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
632 typedef uint64_t efsys_timestamp_t;
634 #define EFSYS_TIMESTAMP(_usp) \
636 *(_usp) = rte_get_timer_cycles() * 1000000 / \
637 rte_get_timer_hz(); \
638 _NOTE(CONSTANTCONDITION); \
643 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
646 (_p) = rte_zmalloc("sfc", (_size), 0); \
647 _NOTE(CONSTANTCONDITION); \
650 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
655 _NOTE(CONSTANTCONDITION); \
660 typedef rte_spinlock_t efsys_lock_t;
662 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
663 rte_spinlock_init((_eslp))
664 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
665 #define SFC_EFSYS_LOCK(_eslp) \
666 rte_spinlock_lock((_eslp))
667 #define SFC_EFSYS_UNLOCK(_eslp) \
668 rte_spinlock_unlock((_eslp))
669 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
670 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
672 typedef int efsys_lock_state_t;
674 #define EFSYS_LOCK_MAGIC 0x000010c4
676 #define EFSYS_LOCK(_lockp, _state) \
678 SFC_EFSYS_LOCK(_lockp); \
679 (_state) = EFSYS_LOCK_MAGIC; \
680 _NOTE(CONSTANTCONDITION); \
683 #define EFSYS_UNLOCK(_lockp, _state) \
685 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
686 SFC_EFSYS_UNLOCK(_lockp); \
687 _NOTE(CONSTANTCONDITION); \
692 typedef uint64_t efsys_stat_t;
694 #define EFSYS_STAT_INCR(_knp, _delta) \
696 *(_knp) += (_delta); \
697 _NOTE(CONSTANTCONDITION); \
700 #define EFSYS_STAT_DECR(_knp, _delta) \
702 *(_knp) -= (_delta); \
703 _NOTE(CONSTANTCONDITION); \
706 #define EFSYS_STAT_SET(_knp, _val) \
709 _NOTE(CONSTANTCONDITION); \
712 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
714 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
715 _NOTE(CONSTANTCONDITION); \
718 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
720 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
721 _NOTE(CONSTANTCONDITION); \
724 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
726 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
727 _NOTE(CONSTANTCONDITION); \
730 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
732 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
733 _NOTE(CONSTANTCONDITION); \
738 #if EFSYS_OPT_DECODE_INTR_FATAL
739 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
742 RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
743 (_code), (_dword0), (_dword1)); \
744 _NOTE(CONSTANTCONDITION); \
750 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
751 * so we re-implement it here
753 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
754 #define EFSYS_ASSERT(_exp) \
756 if (unlikely(!(_exp))) \
757 rte_panic("line %d\tassert \"%s\" failed\n", \
758 __LINE__, (#_exp)); \
761 #define EFSYS_ASSERT(_exp) (void)(_exp)
764 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
766 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
767 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
768 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
772 #define EFSYS_HAS_ROTL_DWORD 0
778 #endif /* _SFC_COMMON_EFSYS_H */