1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_memcpy.h>
18 #include <rte_malloc.h>
19 #include <rte_memzone.h>
20 #include <rte_byteorder.h>
21 #include <rte_cycles.h>
22 #include <rte_spinlock.h>
24 #include <rte_random.h>
27 #include "../ice_logs.h"
42 #define hw_dbg(hw, S, A...) do {} while (0)
43 #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44 #define lower_32_bits(n) ((u32)(n))
45 #define low_16_bits(x) ((x) & 0xFFFF)
46 #define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
49 #define ETH_ADDR_LEN 6
53 #define __le16 uint16_t
56 #define __le32 uint32_t
59 #define __le64 uint64_t
62 #define __be16 uint16_t
65 #define __be32 uint32_t
68 #define __be64 uint64_t
71 #ifndef __always_unused
72 #define __always_unused __attribute__((unused))
74 #ifndef __maybe_unused
75 #define __maybe_unused __attribute__((unused))
78 #define __packed __attribute__((packed))
82 #define BIT_ULL(a) (1ULL << (a))
90 #define min(a, b) RTE_MIN(a, b)
91 #define max(a, b) RTE_MAX(a, b)
93 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
94 #define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
95 #define MAKEMASK(m, s) ((m) << (s))
97 #define DEBUGOUT(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
98 #define DEBUGFUNC(F) PMD_DRV_LOG_RAW(DEBUG, F)
100 #define ice_debug(h, m, s, ...) \
102 if (((m) & (h)->debug_mask)) \
103 PMD_DRV_LOG_RAW(DEBUG, "ice %02x.%x " s, \
104 (h)->bus.device, (h)->bus.func, \
108 #define ice_info(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args)
109 #define ice_warn(hw, fmt, args...) ice_debug(hw, ICE_DBG_ALL, fmt, ##args)
110 #define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
112 struct ice_hw *hw_l = hw; \
116 for (i = 0; i < len_l; i += 8) \
117 ice_debug(hw_l, type, \
118 "0x%04X 0x%016"PRIx64"\n", \
119 i, *((u64 *)((buf_l) + i))); \
121 #define ice_snprintf snprintf
123 #define SNPRINTF ice_snprintf
126 #define ICE_PCI_REG(reg) rte_read32(reg)
127 #define ICE_PCI_REG_ADDR(a, reg) \
128 ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
129 static inline uint32_t ice_read_addr(volatile void *addr)
131 return rte_le_to_cpu_32(ICE_PCI_REG(addr));
134 #define ICE_PCI_REG_WRITE(reg, value) \
135 rte_write32((rte_cpu_to_le_32(value)), reg)
137 #define ice_flush(a) ICE_READ_REG((a), GLGEN_STAT)
138 #define icevf_flush(a) ICE_READ_REG((a), VFGEN_RSTAT)
139 #define ICE_READ_REG(hw, reg) ice_read_addr(ICE_PCI_REG_ADDR((hw), (reg)))
140 #define ICE_WRITE_REG(hw, reg, value) \
141 ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((hw), (reg)), (value))
143 #define rd32(a, reg) ice_read_addr(ICE_PCI_REG_ADDR((a), (reg)))
144 #define wr32(a, reg, value) \
145 ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((a), (reg)), (value))
146 #define flush(a) ice_read_addr(ICE_PCI_REG_ADDR((a), (GLGEN_STAT)))
147 #define div64_long(n, d) ((n) / (d))
149 #define BITS_PER_BYTE 8
150 typedef u32 ice_bitmap_t;
151 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
152 #define BITS_TO_CHUNKS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(ice_bitmap_t))
153 #define ice_declare_bitmap(name, bits) \
154 ice_bitmap_t name[BITS_TO_CHUNKS(bits)]
156 #define BITS_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
157 ((BITS_PER_BYTE * sizeof(ice_bitmap_t)) - \
158 (((nr) - 1) % (BITS_PER_BYTE * sizeof(ice_bitmap_t)) \
160 #define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
161 #define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
162 #define BIT_IN_CHUNK(nr) BIT((nr) % BITS_PER_CHUNK)
164 static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr)
166 return !!(bitmap[BIT_CHUNK(nr)] & BIT_IN_CHUNK(nr));
169 #define ice_and_bitmap(d, b1, b2, sz) \
170 ice_intersect_bitmaps((u8 *)d, (u8 *)b1, (const u8 *)b2, (u16)sz)
172 ice_intersect_bitmaps(u8 *dst, const u8 *bmp1, const u8 *bmp2, u16 sz)
178 /* Utilize 32-bit operations */
179 cnt = (sz % BITS_PER_BYTE) ?
180 (sz / BITS_PER_BYTE) + 1 : sz / BITS_PER_BYTE;
181 for (i = 0; i < cnt / 4; i++) {
182 ((u32 *)dst)[i] = ((const u32 *)bmp1)[i] &
183 ((const u32 *)bmp2)[i];
184 res |= ((u32 *)dst)[i];
187 for (i *= 4; i < cnt; i++) {
188 if ((sz % 8 == 0) || (i + 1 < cnt)) {
189 dst[i] = bmp1[i] & bmp2[i];
191 /* Remaining bits that do not occupy the whole byte */
192 u8 mask = ~0u >> (8 - (sz % 8));
194 dst[i] = bmp1[i] & bmp2[i] & mask;
203 static inline int ice_find_first_bit(ice_bitmap_t *name, u16 size)
207 for (i = 0; i < BITS_PER_BYTE * (size / BITS_PER_BYTE); i++)
208 if (ice_is_bit_set(name, i))
213 static inline int ice_find_next_bit(ice_bitmap_t *name, u16 size, u16 bits)
217 for (i = bits; i < BITS_PER_BYTE * (size / BITS_PER_BYTE); i++)
218 if (ice_is_bit_set(name, i))
223 #define for_each_set_bit(bit, addr, size) \
224 for ((bit) = ice_find_first_bit((addr), (size)); \
226 (bit) = ice_find_next_bit((addr), (size), (bit) + 1))
228 static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u32 bits)
230 u32 max_index = BITS_TO_CHUNKS(bits);
233 for (i = 0; i < max_index; i++) {
240 /* memory allocation tracking */
246 } __attribute__((packed));
248 struct ice_virt_mem {
251 } __attribute__((packed));
253 #define ice_malloc(h, s) rte_zmalloc(NULL, s, 0)
254 #define ice_calloc(h, c, s) rte_zmalloc(NULL, (c) * (s), 0)
255 #define ice_free(h, m) rte_free(m)
257 #define ice_memset(a, b, c, d) memset((a), (b), (c))
258 #define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
259 #define ice_memdup(a, b, c, d) rte_memcpy(ice_malloc(a, c), b, c)
261 #define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
262 #define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
263 #define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
264 #define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
265 #define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
266 #define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
267 #define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
268 #define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
269 #define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
271 #define NTOHS(a) rte_be_to_cpu_16(a)
272 #define NTOHL(a) rte_be_to_cpu_32(a)
273 #define HTONS(a) rte_cpu_to_be_16(a)
274 #define HTONL(a) rte_cpu_to_be_32(a)
277 ice_set_bit(unsigned int nr, volatile ice_bitmap_t *addr)
279 __sync_fetch_and_or(addr, (1UL << nr));
283 ice_clear_bit(unsigned int nr, volatile ice_bitmap_t *addr)
285 __sync_fetch_and_and(addr, (0UL << nr));
289 ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
294 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
296 mask = BITS_CHUNK_MASK(size);
301 ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
302 const ice_bitmap_t *bmp2, u16 size)
307 /* Handle all but last chunk*/
308 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
309 dst[i] = bmp1[i] | bmp2[i];
311 /* We want to only OR bits within the size. Furthermore, we also do
312 * not want to modify destination bits which are beyond the specified
313 * size. Use a bitmask to ensure that we only modify the bits that are
314 * within the specified size.
316 mask = BITS_CHUNK_MASK(size);
318 dst[i] |= (bmp1[i] | bmp2[i]) & mask;
321 static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size)
326 /* Handle all but last chunk*/
327 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
330 /* We want to only copy bits within the size.*/
331 mask = BITS_CHUNK_MASK(size);
333 dst[i] |= src[i] & mask;
337 ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
342 /* Handle all but last chunk*/
343 for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
344 if (bmp1[i] != bmp2[i])
347 /* We want to only compare bits within the size.*/
348 mask = BITS_CHUNK_MASK(size);
349 if ((bmp1[i] & mask) != (bmp2[i] & mask))
357 rte_spinlock_t spinlock;
361 ice_init_lock(struct ice_lock *sp)
363 rte_spinlock_init(&sp->spinlock);
367 ice_acquire_lock(struct ice_lock *sp)
369 rte_spinlock_lock(&sp->spinlock);
373 ice_release_lock(struct ice_lock *sp)
375 rte_spinlock_unlock(&sp->spinlock);
379 ice_destroy_lock(__attribute__((unused)) struct ice_lock *sp)
386 ice_alloc_dma_mem(__attribute__((unused)) struct ice_hw *hw,
387 struct ice_dma_mem *mem, u64 size)
389 const struct rte_memzone *mz = NULL;
390 char z_name[RTE_MEMZONE_NAMESIZE];
395 snprintf(z_name, sizeof(z_name), "ice_dma_%"PRIu64, rte_rand());
396 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
403 mem->pa = mz->phys_addr;
404 mem->zone = (const void *)mz;
405 PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
406 "%"PRIu64, mz->name, mem->pa);
412 ice_free_dma_mem(__attribute__((unused)) struct ice_hw *hw,
413 struct ice_dma_mem *mem)
415 PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
416 "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
418 rte_memzone_free((const struct rte_memzone *)mem->zone);
425 ice_hweight8(u32 num)
430 for (i = 0; i < 8; i++) {
431 bits += (u8)(num & 0x1);
438 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
439 #define DELAY(x) rte_delay_us(x)
440 #define ice_usec_delay(x) rte_delay_us(x)
441 #define ice_msec_delay(x, y) rte_delay_us(1000 * (x))
442 #define udelay(x) DELAY(x)
443 #define msleep(x) DELAY(1000 * (x))
444 #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
446 struct ice_list_entry {
447 LIST_ENTRY(ice_list_entry) next;
450 LIST_HEAD(ice_list_head, ice_list_entry);
452 #define LIST_ENTRY_TYPE ice_list_entry
453 #define LIST_HEAD_TYPE ice_list_head
454 #define INIT_LIST_HEAD(list_head) LIST_INIT(list_head)
455 #define LIST_DEL(entry) LIST_REMOVE(entry, next)
456 /* LIST_EMPTY(list_head)) the same in sys/queue.h */
458 /*Note parameters are swapped*/
459 #define LIST_FIRST_ENTRY(head, type, field) (type *)((head)->lh_first)
460 #define LIST_NEXT_ENTRY(entry, type, field) \
461 ((type *)(entry)->field.next.le_next)
462 #define LIST_ADD(entry, list_head) LIST_INSERT_HEAD(list_head, entry, next)
463 #define LIST_ADD_AFTER(entry, list_entry) \
464 LIST_INSERT_AFTER(list_entry, entry, next)
465 #define LIST_FOR_EACH_ENTRY(pos, head, type, member) \
466 for ((pos) = (head)->lh_first ? \
467 container_of((head)->lh_first, struct type, member) : \
470 (pos) = (pos)->member.next.le_next ? \
471 container_of((pos)->member.next.le_next, struct type, \
475 #define LIST_REPLACE_INIT(list_head, head) do { \
476 (head)->lh_first = (list_head)->lh_first; \
477 INIT_LIST_HEAD(list_head); \
480 #define HLIST_NODE_TYPE LIST_ENTRY_TYPE
481 #define HLIST_HEAD_TYPE LIST_HEAD_TYPE
482 #define INIT_HLIST_HEAD(list_head) INIT_LIST_HEAD(list_head)
483 #define HLIST_ADD_HEAD(entry, list_head) LIST_ADD(entry, list_head)
484 #define HLIST_EMPTY(list_head) LIST_EMPTY(list_head)
485 #define HLIST_DEL(entry) LIST_DEL(entry)
486 #define HLIST_FOR_EACH_ENTRY(pos, head, type, member) \
487 LIST_FOR_EACH_ENTRY(pos, head, type, member)
488 #define LIST_FOR_EACH_ENTRY_SAFE(pos, tmp, head, type, member) \
489 LIST_FOR_EACH_ENTRY(pos, head, type, member)
491 #ifndef ICE_DBG_TRACE
492 #define ICE_DBG_TRACE BIT_ULL(0)
495 #ifndef DIVIDE_AND_ROUND_UP
496 #define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
499 #ifndef ICE_INTEL_VENDOR_ID
500 #define ICE_INTEL_VENDOR_ID 0x8086
503 #ifndef IS_UNICAST_ETHER_ADDR
504 #define IS_UNICAST_ETHER_ADDR(addr) \
505 ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
508 #ifndef IS_MULTICAST_ETHER_ADDR
509 #define IS_MULTICAST_ETHER_ADDR(addr) \
510 ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
513 #ifndef IS_BROADCAST_ETHER_ADDR
514 /* Check whether an address is broadcast. */
515 #define IS_BROADCAST_ETHER_ADDR(addr) \
516 ((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
519 #ifndef IS_ZERO_ETHER_ADDR
520 #define IS_ZERO_ETHER_ADDR(addr) \
521 (((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \
522 ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \
523 ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
526 #endif /* _ICE_OSDEP_H_ */