4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifndef _SFC_COMMON_EFSYS_H
33 #define _SFC_COMMON_EFSYS_H
37 #include <rte_spinlock.h>
38 #include <rte_byteorder.h>
39 #include <rte_debug.h>
40 #include <rte_memzone.h>
41 #include <rte_memory.h>
42 #include <rte_memcpy.h>
43 #include <rte_cycles.h>
44 #include <rte_prefetch.h>
45 #include <rte_common.h>
46 #include <rte_malloc.h>
50 #include "sfc_debug.h"
56 #define EFSYS_HAS_UINT64 1
57 #define EFSYS_USE_UINT64 1
58 #define EFSYS_HAS_SSE2_M128 1
60 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
61 #define EFSYS_IS_BIG_ENDIAN 1
62 #define EFSYS_IS_LITTLE_ENDIAN 0
63 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
64 #define EFSYS_IS_BIG_ENDIAN 0
65 #define EFSYS_IS_LITTLE_ENDIAN 1
67 #error "Cannot determine system endianness"
69 #include "efx_types.h"
76 typedef bool boolean_t;
86 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
87 * expression allowed only inside a function, but MAX() is used as
88 * a number of elements in array.
91 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
94 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
97 /* There are macros for alignment in DPDK, but we need to make a proper
98 * correspondence here, if we want to re-use them at all
101 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
105 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
109 #define P2ALIGN(_x, _a) ((_x) & -(_a))
113 #define ISP2(x) rte_is_power_of_2(x)
116 #define ENOTACTIVE ENOTCONN
119 prefetch_read_many(const volatile void *addr)
125 prefetch_read_once(const volatile void *addr)
127 rte_prefetch_non_temporal(addr);
130 /* Modifiers used for Windows builds */
133 #define __in_ecount(_n)
134 #define __in_ecount_opt(_n)
135 #define __in_bcount(_n)
136 #define __in_bcount_opt(_n)
140 #define __out_ecount(_n)
141 #define __out_ecount_opt(_n)
142 #define __out_bcount(_n)
143 #define __out_bcount_opt(_n)
149 #define __inout_ecount(_n)
150 #define __inout_ecount_opt(_n)
151 #define __inout_bcount(_n)
152 #define __inout_bcount_opt(_n)
153 #define __inout_bcount_full_opt(_n)
155 #define __deref_out_bcount_opt(n)
157 #define __checkReturn
158 #define __success(_x)
160 #define __drv_when(_p, _c)
162 /* Code inclusion options */
165 #define EFSYS_OPT_NAMES 1
167 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
168 #define EFSYS_OPT_SIENA 0
169 /* Enable SFN7xxx support */
170 #define EFSYS_OPT_HUNTINGTON 1
171 /* Enable SFN8xxx support */
172 #define EFSYS_OPT_MEDFORD 1
173 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
174 #define EFSYS_OPT_CHECK_REG 1
176 #define EFSYS_OPT_CHECK_REG 0
179 /* MCDI is required for SFN7xxx and SFN8xx */
180 #define EFSYS_OPT_MCDI 1
181 #define EFSYS_OPT_MCDI_LOGGING 1
182 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
184 #define EFSYS_OPT_MAC_STATS 1
186 #define EFSYS_OPT_LOOPBACK 0
188 #define EFSYS_OPT_MON_MCDI 0
189 #define EFSYS_OPT_MON_STATS 0
191 #define EFSYS_OPT_PHY_STATS 0
192 #define EFSYS_OPT_BIST 0
193 #define EFSYS_OPT_PHY_LED_CONTROL 0
194 #define EFSYS_OPT_PHY_FLAGS 0
196 #define EFSYS_OPT_VPD 0
197 #define EFSYS_OPT_NVRAM 0
198 #define EFSYS_OPT_BOOTCFG 0
200 #define EFSYS_OPT_DIAG 0
201 #define EFSYS_OPT_RX_SCALE 1
202 #define EFSYS_OPT_QSTATS 0
203 /* Filters support is required for SFN7xxx and SFN8xx */
204 #define EFSYS_OPT_FILTER 1
205 #define EFSYS_OPT_RX_SCATTER 0
207 #define EFSYS_OPT_EV_PREFETCH 0
209 #define EFSYS_OPT_DECODE_INTR_FATAL 0
211 #define EFSYS_OPT_LICENSING 0
213 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
215 #define EFSYS_OPT_RX_PACKED_STREAM 0
217 #define EFSYS_OPT_TUNNEL 0
221 typedef struct __efsys_identifier_s efsys_identifier_t;
224 #define EFSYS_PROBE(_name) \
227 #define EFSYS_PROBE1(_name, _type1, _arg1) \
230 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
233 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
237 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
238 _type3, _arg3, _type4, _arg4) \
241 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
242 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
245 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
246 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
250 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
251 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
252 _type6, _arg6, _type7, _arg7) \
258 typedef rte_iova_t efsys_dma_addr_t;
260 typedef struct efsys_mem_s {
261 const struct rte_memzone *esm_mz;
263 * Ideally it should have volatile qualifier to denote that
264 * the memory may be updated by someone else. However, it adds
265 * qualifier discard warnings when the pointer or its derivative
266 * is passed to memset() or rte_mov16().
267 * So, skip the qualifier here, but make sure that it is added
268 * below in access macros.
271 efsys_dma_addr_t esm_addr;
275 #define EFSYS_MEM_ZERO(_esmp, _size) \
277 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
279 _NOTE(CONSTANTCONDITION); \
282 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
284 volatile uint8_t *_base = (_esmp)->esm_base; \
285 volatile uint32_t *_addr; \
287 _NOTE(CONSTANTCONDITION); \
288 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
290 _addr = (volatile uint32_t *)(_base + (_offset)); \
291 (_edp)->ed_u32[0] = _addr[0]; \
293 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
294 uint32_t, (_edp)->ed_u32[0]); \
296 _NOTE(CONSTANTCONDITION); \
299 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
301 volatile uint8_t *_base = (_esmp)->esm_base; \
302 volatile uint64_t *_addr; \
304 _NOTE(CONSTANTCONDITION); \
305 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
307 _addr = (volatile uint64_t *)(_base + (_offset)); \
308 (_eqp)->eq_u64[0] = _addr[0]; \
310 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
311 uint32_t, (_eqp)->eq_u32[1], \
312 uint32_t, (_eqp)->eq_u32[0]); \
314 _NOTE(CONSTANTCONDITION); \
317 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
319 volatile uint8_t *_base = (_esmp)->esm_base; \
320 volatile __m128i *_addr; \
322 _NOTE(CONSTANTCONDITION); \
323 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
325 _addr = (volatile __m128i *)(_base + (_offset)); \
326 (_eop)->eo_u128[0] = _addr[0]; \
328 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
329 uint32_t, (_eop)->eo_u32[3], \
330 uint32_t, (_eop)->eo_u32[2], \
331 uint32_t, (_eop)->eo_u32[1], \
332 uint32_t, (_eop)->eo_u32[0]); \
334 _NOTE(CONSTANTCONDITION); \
338 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
340 volatile uint8_t *_base = (_esmp)->esm_base; \
341 volatile uint32_t *_addr; \
343 _NOTE(CONSTANTCONDITION); \
344 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
346 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
347 uint32_t, (_edp)->ed_u32[0]); \
349 _addr = (volatile uint32_t *)(_base + (_offset)); \
350 _addr[0] = (_edp)->ed_u32[0]; \
352 _NOTE(CONSTANTCONDITION); \
355 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
357 volatile uint8_t *_base = (_esmp)->esm_base; \
358 volatile uint64_t *_addr; \
360 _NOTE(CONSTANTCONDITION); \
361 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
363 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
364 uint32_t, (_eqp)->eq_u32[1], \
365 uint32_t, (_eqp)->eq_u32[0]); \
367 _addr = (volatile uint64_t *)(_base + (_offset)); \
368 _addr[0] = (_eqp)->eq_u64[0]; \
370 _NOTE(CONSTANTCONDITION); \
373 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
375 volatile uint8_t *_base = (_esmp)->esm_base; \
376 volatile __m128i *_addr; \
378 _NOTE(CONSTANTCONDITION); \
379 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
382 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
383 uint32_t, (_eop)->eo_u32[3], \
384 uint32_t, (_eop)->eo_u32[2], \
385 uint32_t, (_eop)->eo_u32[1], \
386 uint32_t, (_eop)->eo_u32[0]); \
388 _addr = (volatile __m128i *)(_base + (_offset)); \
389 _addr[0] = (_eop)->eo_u128[0]; \
391 _NOTE(CONSTANTCONDITION); \
395 #define EFSYS_MEM_ADDR(_esmp) \
398 #define EFSYS_MEM_IS_NULL(_esmp) \
399 ((_esmp)->esm_base == NULL)
401 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
403 volatile uint8_t *_base = (_esmp)->esm_base; \
405 rte_prefetch0(_base + (_offset)); \
411 typedef struct efsys_bar_s {
412 rte_spinlock_t esb_lock;
414 struct rte_pci_device *esb_dev;
416 * Ideally it should have volatile qualifier to denote that
417 * the memory may be updated by someone else. However, it adds
418 * qualifier discard warnings when the pointer or its derivative
419 * is passed to memset() or rte_mov16().
420 * So, skip the qualifier here, but make sure that it is added
421 * below in access macros.
426 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
428 rte_spinlock_init(&(_esbp)->esb_lock); \
429 _NOTE(CONSTANTCONDITION); \
431 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
432 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
433 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
435 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
437 volatile uint8_t *_base = (_esbp)->esb_base; \
438 volatile uint32_t *_addr; \
440 _NOTE(CONSTANTCONDITION); \
441 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
442 _NOTE(CONSTANTCONDITION); \
444 SFC_BAR_LOCK(_esbp); \
446 _addr = (volatile uint32_t *)(_base + (_offset)); \
448 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
450 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
451 uint32_t, (_edp)->ed_u32[0]); \
453 _NOTE(CONSTANTCONDITION); \
455 SFC_BAR_UNLOCK(_esbp); \
456 _NOTE(CONSTANTCONDITION); \
459 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
461 volatile uint8_t *_base = (_esbp)->esb_base; \
462 volatile uint64_t *_addr; \
464 _NOTE(CONSTANTCONDITION); \
465 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
467 SFC_BAR_LOCK(_esbp); \
469 _addr = (volatile uint64_t *)(_base + (_offset)); \
471 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
473 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
474 uint32_t, (_eqp)->eq_u32[1], \
475 uint32_t, (_eqp)->eq_u32[0]); \
477 SFC_BAR_UNLOCK(_esbp); \
478 _NOTE(CONSTANTCONDITION); \
481 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
483 volatile uint8_t *_base = (_esbp)->esb_base; \
484 volatile __m128i *_addr; \
486 _NOTE(CONSTANTCONDITION); \
487 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
489 _NOTE(CONSTANTCONDITION); \
491 SFC_BAR_LOCK(_esbp); \
493 _addr = (volatile __m128i *)(_base + (_offset)); \
495 /* There is no rte_read128_relaxed() yet */ \
496 (_eop)->eo_u128[0] = _addr[0]; \
498 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
499 uint32_t, (_eop)->eo_u32[3], \
500 uint32_t, (_eop)->eo_u32[2], \
501 uint32_t, (_eop)->eo_u32[1], \
502 uint32_t, (_eop)->eo_u32[0]); \
504 _NOTE(CONSTANTCONDITION); \
506 SFC_BAR_UNLOCK(_esbp); \
507 _NOTE(CONSTANTCONDITION); \
511 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
513 volatile uint8_t *_base = (_esbp)->esb_base; \
514 volatile uint32_t *_addr; \
516 _NOTE(CONSTANTCONDITION); \
517 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
519 _NOTE(CONSTANTCONDITION); \
521 SFC_BAR_LOCK(_esbp); \
523 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
524 uint32_t, (_edp)->ed_u32[0]); \
526 _addr = (volatile uint32_t *)(_base + (_offset)); \
527 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
530 _NOTE(CONSTANTCONDITION); \
532 SFC_BAR_UNLOCK(_esbp); \
533 _NOTE(CONSTANTCONDITION); \
536 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
538 volatile uint8_t *_base = (_esbp)->esb_base; \
539 volatile uint64_t *_addr; \
541 _NOTE(CONSTANTCONDITION); \
542 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
544 SFC_BAR_LOCK(_esbp); \
546 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
547 uint32_t, (_eqp)->eq_u32[1], \
548 uint32_t, (_eqp)->eq_u32[0]); \
550 _addr = (volatile uint64_t *)(_base + (_offset)); \
551 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
554 SFC_BAR_UNLOCK(_esbp); \
555 _NOTE(CONSTANTCONDITION); \
559 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
560 * (required by PIO hardware).
562 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
563 * write-combined memory mapped to user-land, so just abort if used.
565 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
567 rte_panic("Write-combined BAR access not supported"); \
570 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
572 volatile uint8_t *_base = (_esbp)->esb_base; \
573 volatile __m128i *_addr; \
575 _NOTE(CONSTANTCONDITION); \
576 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
578 _NOTE(CONSTANTCONDITION); \
580 SFC_BAR_LOCK(_esbp); \
582 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
583 uint32_t, (_eop)->eo_u32[3], \
584 uint32_t, (_eop)->eo_u32[2], \
585 uint32_t, (_eop)->eo_u32[1], \
586 uint32_t, (_eop)->eo_u32[0]); \
588 _addr = (volatile __m128i *)(_base + (_offset)); \
589 /* There is no rte_write128_relaxed() yet */ \
590 _addr[0] = (_eop)->eo_u128[0]; \
593 _NOTE(CONSTANTCONDITION); \
595 SFC_BAR_UNLOCK(_esbp); \
596 _NOTE(CONSTANTCONDITION); \
599 /* Use the standard octo-word write for doorbell writes */
600 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
602 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
603 _NOTE(CONSTANTCONDITION); \
608 #define EFSYS_SPIN(_us) \
611 _NOTE(CONSTANTCONDITION); \
614 #define EFSYS_SLEEP EFSYS_SPIN
618 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
619 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
624 * DPDK does not provide any DMA syncing API, and no PMD drivers
625 * have any traces of explicit DMA syncing.
626 * DMA mapping is assumed to be coherent.
629 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
631 /* Just avoid store and compiler (impliciltly) reordering */
632 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
636 typedef uint64_t efsys_timestamp_t;
638 #define EFSYS_TIMESTAMP(_usp) \
640 *(_usp) = rte_get_timer_cycles() * 1000000 / \
641 rte_get_timer_hz(); \
642 _NOTE(CONSTANTCONDITION); \
647 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
650 (_p) = rte_zmalloc("sfc", (_size), 0); \
651 _NOTE(CONSTANTCONDITION); \
654 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
659 _NOTE(CONSTANTCONDITION); \
664 typedef rte_spinlock_t efsys_lock_t;
666 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
667 rte_spinlock_init((_eslp))
668 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
669 #define SFC_EFSYS_LOCK(_eslp) \
670 rte_spinlock_lock((_eslp))
671 #define SFC_EFSYS_UNLOCK(_eslp) \
672 rte_spinlock_unlock((_eslp))
673 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
674 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
676 typedef int efsys_lock_state_t;
678 #define EFSYS_LOCK_MAGIC 0x000010c4
680 #define EFSYS_LOCK(_lockp, _state) \
682 SFC_EFSYS_LOCK(_lockp); \
683 (_state) = EFSYS_LOCK_MAGIC; \
684 _NOTE(CONSTANTCONDITION); \
687 #define EFSYS_UNLOCK(_lockp, _state) \
689 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
690 SFC_EFSYS_UNLOCK(_lockp); \
691 _NOTE(CONSTANTCONDITION); \
696 typedef uint64_t efsys_stat_t;
698 #define EFSYS_STAT_INCR(_knp, _delta) \
700 *(_knp) += (_delta); \
701 _NOTE(CONSTANTCONDITION); \
704 #define EFSYS_STAT_DECR(_knp, _delta) \
706 *(_knp) -= (_delta); \
707 _NOTE(CONSTANTCONDITION); \
710 #define EFSYS_STAT_SET(_knp, _val) \
713 _NOTE(CONSTANTCONDITION); \
716 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
718 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
719 _NOTE(CONSTANTCONDITION); \
722 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
724 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
725 _NOTE(CONSTANTCONDITION); \
728 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
730 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
731 _NOTE(CONSTANTCONDITION); \
734 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
736 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
737 _NOTE(CONSTANTCONDITION); \
742 #if EFSYS_OPT_DECODE_INTR_FATAL
743 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
746 RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
747 (_code), (_dword0), (_dword1)); \
748 _NOTE(CONSTANTCONDITION); \
754 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
755 * so we re-implement it here
757 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
758 #define EFSYS_ASSERT(_exp) \
760 if (unlikely(!(_exp))) \
761 rte_panic("line %d\tassert \"%s\" failed\n", \
762 __LINE__, (#_exp)); \
765 #define EFSYS_ASSERT(_exp) (void)(_exp)
768 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
770 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
771 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
772 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
776 #define EFSYS_HAS_ROTL_DWORD 0
782 #endif /* _SFC_COMMON_EFSYS_H */