1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2011 Freescale Semiconductor, Inc.
5 * Copyright 2019-2020 NXP
24 #include <linux/types.h>
28 #include <sys/types.h>
38 #include <rte_byteorder.h>
39 #include <rte_atomic.h>
40 #include <rte_spinlock.h>
41 #include <rte_common.h>
42 #include <rte_debug.h>
43 #include <rte_cycles.h>
44 #include <rte_malloc.h>
46 /* The following definitions are primarily to allow the single-source driver
47 * interfaces to be included by arbitrary program code. Ie. for interfaces that
48 * are also available in kernel-space, these definitions provide compatibility
49 * with certain attributes and types used in those interfaces.
52 /* Required compiler attributes */
53 #ifndef __maybe_unused
54 #define __maybe_unused __rte_unused
56 #ifndef __always_unused
57 #define __always_unused __rte_unused
60 #define __packed __rte_packed
63 #define noinline __rte_noinline
65 #define L1_CACHE_BYTES 64
66 #define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES)
67 #define __stringify_1(x) #x
68 #define __stringify(x) __stringify_1(x)
73 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
76 #define prflush(fmt, args...) \
78 printf(fmt, ##args); \
82 #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
85 #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
88 #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
91 #define pr_info(fmt, args...) prflush(fmt, ##args)
94 #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
95 #define pr_debug(fmt, args...) printf(fmt, ##args)
97 #define pr_debug(fmt, args...) {}
101 #define DPAA_BUG_ON(x) RTE_ASSERT(x)
105 typedef uint16_t u16;
106 typedef uint32_t u32;
107 typedef uint64_t u64;
108 typedef uint64_t dma_addr_t;
109 typedef cpu_set_t cpumask_t;
110 typedef uint32_t phandle;
111 typedef uint32_t gfp_t;
112 typedef uint32_t irqreturn_t;
114 #define ETHER_ADDR_LEN 6
116 #define IRQ_HANDLED 0
117 #define request_irq qbman_request_irq
118 #define free_irq qbman_free_irq
122 #define __raw_readb(p) (*(const volatile unsigned char *)(p))
123 #define __raw_readl(p) (*(const volatile unsigned int *)(p))
124 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
126 /* to be used as an upper-limit only */
129 /* Waitqueue stuff */
130 typedef struct { } wait_queue_head_t;
131 #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
132 #define wake_up(x) do { } while (0)
135 static inline u32 in_be32(volatile void *__p)
137 volatile u32 *p = __p;
138 return rte_be_to_cpu_32(*p);
141 static inline void out_be32(volatile void *__p, u32 val)
143 volatile u32 *p = __p;
144 *p = rte_cpu_to_be_32(val);
147 #define hwsync() rte_rmb()
148 #define lwsync() rte_wmb()
150 #define dcbt_ro(p) __builtin_prefetch(p, 0)
151 #define dcbt_rw(p) __builtin_prefetch(p, 1)
153 #if defined(RTE_ARCH_ARM64)
154 #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
155 #define dcbz_64(p) dcbz(p)
156 #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
157 #define dcbf_64(p) dcbf(p)
158 #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
160 #define dcbit_ro(p) \
163 asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
166 #elif defined(RTE_ARCH_ARM)
167 #define dcbz(p) memset((p), 0, 32)
168 #define dcbz_64(p) memset((p), 0, 64)
169 #define dcbf(p) RTE_SET_USED(p)
170 #define dcbf_64(p) dcbf(p)
171 #define dccivac(p) RTE_SET_USED(p)
172 #define dcbit_ro(p) RTE_SET_USED(p)
175 #define dcbz(p) RTE_SET_USED(p)
176 #define dcbz_64(p) dcbz(p)
177 #define dcbf(p) RTE_SET_USED(p)
178 #define dcbf_64(p) dcbf(p)
179 #define dccivac(p) RTE_SET_USED(p)
180 #define dcbit_ro(p) RTE_SET_USED(p)
183 #define barrier() { asm volatile ("" : : : "memory"); }
184 #define cpu_relax barrier
186 #if defined(RTE_ARCH_ARM64)
187 static inline uint64_t mfatb(void)
189 uint64_t ret, ret_new, timeout = 200;
191 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
192 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
193 while (ret != ret_new && timeout--) {
195 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
197 DPAA_BUG_ON(!timeout && (ret != ret_new));
202 #define mfatb rte_rdtsc
206 /* Spin for a few cycles without bothering the bus */
207 static inline void cpu_spin(int cycles)
209 uint64_t now = mfatb();
211 while (mfatb() < (now + cycles))
215 /* Qman/Bman API inlines and macros; */
219 #define lower_32_bits(x) ((u32)(x))
224 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
227 * Swap bytes of a 48-bit value.
229 static inline uint64_t
230 __bswap_48(uint64_t x)
232 return ((x & 0x0000000000ffULL) << 40) |
233 ((x & 0x00000000ff00ULL) << 24) |
234 ((x & 0x000000ff0000ULL) << 8) |
235 ((x & 0x0000ff000000ULL) >> 8) |
236 ((x & 0x00ff00000000ULL) >> 24) |
237 ((x & 0xff0000000000ULL) >> 40);
241 * Swap bytes of a 40-bit value.
243 static inline uint64_t
244 __bswap_40(uint64_t x)
246 return ((x & 0x00000000ffULL) << 32) |
247 ((x & 0x000000ff00ULL) << 16) |
248 ((x & 0x0000ff0000ULL)) |
249 ((x & 0x00ff000000ULL) >> 16) |
250 ((x & 0xff00000000ULL) >> 32);
254 * Swap bytes of a 24-bit value.
256 static inline uint32_t
257 __bswap_24(uint32_t x)
259 return ((x & 0x0000ffULL) << 16) |
260 ((x & 0x00ff00ULL)) |
261 ((x & 0xff0000ULL) >> 16);
264 #define be64_to_cpu(x) rte_be_to_cpu_64(x)
265 #define be32_to_cpu(x) rte_be_to_cpu_32(x)
266 #define be16_to_cpu(x) rte_be_to_cpu_16(x)
268 #define cpu_to_be64(x) rte_cpu_to_be_64(x)
269 #if !defined(cpu_to_be32)
270 #define cpu_to_be32(x) rte_cpu_to_be_32(x)
272 #define cpu_to_be16(x) rte_cpu_to_be_16(x)
274 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
276 #define cpu_to_be48(x) __bswap_48(x)
277 #define be48_to_cpu(x) __bswap_48(x)
279 #define cpu_to_be40(x) __bswap_40(x)
280 #define be40_to_cpu(x) __bswap_40(x)
282 #define cpu_to_be24(x) __bswap_24(x)
283 #define be24_to_cpu(x) __bswap_24(x)
285 #else /* RTE_BIG_ENDIAN */
287 #define cpu_to_be48(x) (x)
288 #define be48_to_cpu(x) (x)
290 #define cpu_to_be40(x) (x)
291 #define be40_to_cpu(x) (x)
293 #define cpu_to_be24(x) (x)
294 #define be24_to_cpu(x) (x)
296 #endif /* RTE_BIG_ENDIAN */
298 /* When copying aligned words or shorts, try to avoid memcpy() */
299 /* memcpy() stuff - when you know alignments in advance */
300 #define CONFIG_TRY_BETTER_MEMCPY
302 #ifdef CONFIG_TRY_BETTER_MEMCPY
303 static inline void copy_words(void *dest, const void *src, size_t sz)
306 const u32 *__src = src;
307 size_t __sz = sz >> 2;
309 DPAA_BUG_ON((unsigned long)dest & 0x3);
310 DPAA_BUG_ON((unsigned long)src & 0x3);
311 DPAA_BUG_ON(sz & 0x3);
313 *(__dest++) = *(__src++);
316 static inline void copy_shorts(void *dest, const void *src, size_t sz)
319 const u16 *__src = src;
320 size_t __sz = sz >> 1;
322 DPAA_BUG_ON((unsigned long)dest & 0x1);
323 DPAA_BUG_ON((unsigned long)src & 0x1);
324 DPAA_BUG_ON(sz & 0x1);
326 *(__dest++) = *(__src++);
329 static inline void copy_bytes(void *dest, const void *src, size_t sz)
332 const u8 *__src = src;
335 *(__dest++) = *(__src++);
338 #define copy_words memcpy
339 #define copy_shorts memcpy
340 #define copy_bytes memcpy
343 /* Allocator stuff */
344 #define kmalloc(sz, t) rte_malloc(NULL, sz, 0)
345 #define vmalloc(sz) rte_malloc(NULL, sz, 0)
346 #define kfree(p) { if (p) rte_free(p); }
347 static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
349 void *ptr = rte_malloc(NULL, sz, 0);
356 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
360 if (posix_memalign(&p, 4096, 4096))
363 return (unsigned long)p;
367 #define spinlock_t rte_spinlock_t
368 #define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
369 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
370 #define spin_lock_init(x) rte_spinlock_init(x)
371 #define spin_lock_destroy(x)
372 #define spin_lock(x) rte_spinlock_lock(x)
373 #define spin_unlock(x) rte_spinlock_unlock(x)
374 #define spin_lock_irq(x) spin_lock(x)
375 #define spin_unlock_irq(x) spin_unlock(x)
376 #define spin_lock_irqsave(x, f) spin_lock_irq(x)
377 #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
379 #define atomic_t rte_atomic32_t
380 #define atomic_read(v) rte_atomic32_read(v)
381 #define atomic_set(v, i) rte_atomic32_set(v, i)
383 #define atomic_inc(v) rte_atomic32_add(v, 1)
384 #define atomic_dec(v) rte_atomic32_sub(v, 1)
386 #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
387 #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
389 #define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
390 #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
391 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
393 /* Interface name len*/
394 #define IF_NAME_MAX_LEN 16
396 #endif /* __COMPAT_H */