1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_memcpy.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
20 #include <rte_byteorder.h>
21 #include <rte_cycles.h>
22 #include <rte_spinlock.h>
24 #include <rte_random.h>
27 #include "ice_alloc.h"
29 #include "../ice_logs.h"
31 #ifndef __INTEL_NET_BASE_OSDEP__
32 #define __INTEL_NET_BASE_OSDEP__
47 #define __le16 uint16_t
50 #define __le32 uint32_t
53 #define __le64 uint64_t
56 #define __be16 uint16_t
59 #define __be32 uint32_t
62 #define __be64 uint64_t
65 #define min(a, b) RTE_MIN(a, b)
66 #define max(a, b) RTE_MAX(a, b)
68 #define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f)
69 #define ARRAY_SIZE(arr) RTE_DIM(arr)
71 #define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
72 #define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
73 #define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
74 #define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
75 #define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
76 #define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
78 #define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
79 #define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
80 #define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
81 #define BE16_TO_CPU(o) rte_be_to_cpu_16(o)
83 #define NTOHS(a) rte_be_to_cpu_16(a)
84 #define NTOHL(a) rte_be_to_cpu_32(a)
85 #define HTONS(a) rte_cpu_to_be_16(a)
86 #define HTONL(a) rte_cpu_to_be_32(a)
88 static __rte_always_inline uint32_t
89 readl(volatile void *addr)
91 return rte_le_to_cpu_32(rte_read32(addr));
94 static __rte_always_inline void
95 writel(uint32_t value, volatile void *addr)
97 rte_write32(rte_cpu_to_le_32(value), addr);
100 static __rte_always_inline void
101 writel_relaxed(uint32_t value, volatile void *addr)
103 rte_write32_relaxed(rte_cpu_to_le_32(value), addr);
106 static __rte_always_inline uint64_t
107 readq(volatile void *addr)
109 return rte_le_to_cpu_64(rte_read64(addr));
112 static __rte_always_inline void
113 writeq(uint64_t value, volatile void *addr)
115 rte_write64(rte_cpu_to_le_64(value), addr);
118 #define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg))
119 #define rd32(a, reg) readl((a)->hw_addr + (reg))
120 #define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg))
121 #define rd64(a, reg) readq((a)->hw_addr + (reg))
123 #endif /* __INTEL_NET_BASE_OSDEP__ */
125 #ifndef __always_unused
126 #define __always_unused __rte_unused
128 #ifndef __maybe_unused
129 #define __maybe_unused __rte_unused
132 #define __packed __rte_packed
136 #define BIT_ULL(a) (1ULL << (a))
139 #define MAKEMASK(m, s) ((m) << (s))
141 #define ice_debug(h, m, s, ...) \
143 if (((m) & (h)->debug_mask)) \
144 PMD_DRV_LOG_RAW(DEBUG, "ice %02x.%x " s, \
145 (h)->bus.device, (h)->bus.func, \
149 #define ice_info(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args)
150 #define ice_warn(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args)
151 #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
153 struct ice_hw *hw_l = hw; \
157 for (i = 0; i < len_l; i += 8) \
158 ice_debug(hw_l, type, \
159 "0x%04X 0x%016"PRIx64"\n", \
160 i, *((u64 *)((buf_l) + i))); \
162 #define ice_snprintf snprintf
164 #define SNPRINTF ice_snprintf
167 #define ICE_PCI_REG_WRITE(reg, value) writel(value, reg)
169 #define ICE_READ_REG(hw, reg) rd32(hw, reg)
170 #define ICE_WRITE_REG(hw, reg, value) wr32(hw, reg, value)
172 #define ice_flush(a) ICE_READ_REG((a), GLGEN_STAT)
173 #define icevf_flush(a) ICE_READ_REG((a), VFGEN_RSTAT)
175 #define flush(a) ICE_READ_REG((a), GLGEN_STAT)
176 #define div64_long(n, d) ((n) / (d))
178 #define BITS_PER_BYTE 8
180 /* memory allocation tracking */
188 struct ice_virt_mem {
193 #define ice_malloc(h, s) rte_zmalloc(NULL, s, 0)
194 #define ice_calloc(h, c, s) rte_zmalloc(NULL, (c) * (s), 0)
195 #define ice_free(h, m) rte_free(m)
197 #define ice_memset(a, b, c, d) memset((a), (b), (c))
198 #define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
202 rte_spinlock_t spinlock;
206 ice_init_lock(struct ice_lock *sp)
208 rte_spinlock_init(&sp->spinlock);
212 ice_acquire_lock(struct ice_lock *sp)
214 rte_spinlock_lock(&sp->spinlock);
218 ice_release_lock(struct ice_lock *sp)
220 rte_spinlock_unlock(&sp->spinlock);
224 ice_destroy_lock(__rte_unused struct ice_lock *sp)
230 static __rte_always_inline void *
231 ice_memdup(__rte_unused struct ice_hw *hw, const void *src, size_t size,
232 __rte_unused enum ice_memcpy_type dir)
236 p = ice_malloc(hw, size);
238 rte_memcpy(p, src, size);
244 ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
245 struct ice_dma_mem *mem, u64 size)
247 const struct rte_memzone *mz = NULL;
248 char z_name[RTE_MEMZONE_NAMESIZE];
253 snprintf(z_name, sizeof(z_name), "ice_dma_%"PRIu64, rte_rand());
254 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
261 mem->pa = mz->phys_addr;
262 mem->zone = (const void *)mz;
263 PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
264 "%"PRIu64, mz->name, mem->pa);
270 ice_free_dma_mem(__rte_unused struct ice_hw *hw,
271 struct ice_dma_mem *mem)
273 PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
274 "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
276 rte_memzone_free((const struct rte_memzone *)mem->zone);
283 ice_hweight8(u32 num)
288 for (i = 0; i < 8; i++) {
289 bits += (u8)(num & 0x1);
297 ice_hweight32(u32 num)
302 for (i = 0; i < 32; i++) {
303 bits += (u8)(num & 0x1);
310 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
311 #define DELAY(x) rte_delay_us(x)
312 #define ice_usec_delay(x, y) rte_delay_us(x)
313 #define ice_msec_delay(x, y) rte_delay_us(1000 * (x))
314 #define udelay(x) DELAY(x)
315 #define msleep(x) DELAY(1000 * (x))
316 #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
318 struct ice_list_entry {
319 LIST_ENTRY(ice_list_entry) next;
322 LIST_HEAD(ice_list_head, ice_list_entry);
324 #define LIST_ENTRY_TYPE ice_list_entry
325 #define LIST_HEAD_TYPE ice_list_head
326 #define INIT_LIST_HEAD(list_head) LIST_INIT(list_head)
327 #define LIST_DEL(entry) LIST_REMOVE(entry, next)
328 /* LIST_EMPTY(list_head)) the same in sys/queue.h */
330 /*Note parameters are swapped*/
331 #define LIST_FIRST_ENTRY(head, type, field) (type *)((head)->lh_first)
332 #define LIST_NEXT_ENTRY(entry, type, field) \
333 ((type *)(entry)->field.next.le_next)
334 #define LIST_ADD(entry, list_head) LIST_INSERT_HEAD(list_head, entry, next)
335 #define LIST_ADD_AFTER(entry, list_entry) \
336 LIST_INSERT_AFTER(list_entry, entry, next)
338 static inline void list_add_tail(struct ice_list_entry *entry,
339 struct ice_list_head *head)
341 struct ice_list_entry *tail = head->lh_first;
344 LIST_INSERT_HEAD(head, entry, next);
347 while (tail->next.le_next != NULL)
348 tail = tail->next.le_next;
349 LIST_INSERT_AFTER(tail, entry, next);
352 #define LIST_ADD_TAIL(entry, head) list_add_tail(entry, head)
353 #define LIST_FOR_EACH_ENTRY(pos, head, type, member) \
354 for ((pos) = (head)->lh_first ? \
355 container_of((head)->lh_first, struct type, member) : \
358 (pos) = (pos)->member.next.le_next ? \
359 container_of((pos)->member.next.le_next, struct type, \
363 #define LIST_FOR_EACH_ENTRY_SAFE(pos, tmp, head, type, member) \
364 for ((pos) = (head)->lh_first ? \
365 container_of((head)->lh_first, struct type, member) : \
367 (tmp) = (pos) == 0 ? 0 : ((pos)->member.next.le_next ? \
368 container_of((pos)->member.next.le_next, struct type, \
373 (tmp) = (pos) == 0 ? 0 : ((tmp)->member.next.le_next ? \
374 container_of((pos)->member.next.le_next, struct type, \
378 #define LIST_REPLACE_INIT(list_head, head) do { \
379 (head)->lh_first = (list_head)->lh_first; \
380 INIT_LIST_HEAD(list_head); \
383 #define HLIST_NODE_TYPE LIST_ENTRY_TYPE
384 #define HLIST_HEAD_TYPE LIST_HEAD_TYPE
385 #define INIT_HLIST_HEAD(list_head) INIT_LIST_HEAD(list_head)
386 #define HLIST_ADD_HEAD(entry, list_head) LIST_ADD(entry, list_head)
387 #define HLIST_EMPTY(list_head) LIST_EMPTY(list_head)
388 #define HLIST_DEL(entry) LIST_DEL(entry)
389 #define HLIST_FOR_EACH_ENTRY(pos, head, type, member) \
390 LIST_FOR_EACH_ENTRY(pos, head, type, member)
392 #ifndef ICE_DBG_TRACE
393 #define ICE_DBG_TRACE BIT_ULL(0)
396 #ifndef DIVIDE_AND_ROUND_UP
397 #define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
400 #ifndef ICE_INTEL_VENDOR_ID
401 #define ICE_INTEL_VENDOR_ID 0x8086
404 #ifndef IS_UNICAST_ETHER_ADDR
405 #define IS_UNICAST_ETHER_ADDR(addr) \
406 ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
409 #ifndef IS_MULTICAST_ETHER_ADDR
410 #define IS_MULTICAST_ETHER_ADDR(addr) \
411 ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
414 #ifndef IS_BROADCAST_ETHER_ADDR
415 /* Check whether an address is broadcast. */
416 #define IS_BROADCAST_ETHER_ADDR(addr) \
417 ((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
420 #ifndef IS_ZERO_ETHER_ADDR
421 #define IS_ZERO_ETHER_ADDR(addr) \
422 (((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \
423 ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \
424 ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
427 #endif /* _ICE_OSDEP_H_ */