1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef __OCTEONTX_FPAVF_H__
6 #define __OCTEONTX_FPAVF_H__
9 #include "octeontx_pool_logs.h"
11 /* fpa pool Vendor ID and Device ID */
12 #define PCI_VENDOR_ID_CAVIUM 0x177D
13 #define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
16 #define FPA_GPOOL_MASK (FPA_VF_MAX-1)
17 #define FPA_GAURA_SHIFT 4
19 /* FPA VF register offsets */
20 #define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
21 #define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
22 #define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
23 #define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
25 #define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
26 #define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
27 #define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
28 #define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
30 #define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
31 #define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
32 #define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
33 #define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
34 #define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
35 #define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
37 #define FPA_VF_FREE_ADDRS_S(x, y, z) \
38 ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
40 #define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
41 /* FPA VF register offsets from VF_BAR4, size 2 MByte */
42 #define FPA_VF_MSIX_VEC_ADDR 0x00000
43 #define FPA_VF_MSIX_VEC_CTL 0x00008
44 #define FPA_VF_MSIX_PBA 0xF0000
46 #define FPA_VF0_APERTURE_SHIFT 22
47 #define FPA_AURA_SET_SIZE 16
49 #define FPA_MAX_OBJ_SIZE (128 * 1024)
50 #define OCTEONTX_FPAVF_BUF_OFFSET 128
53 * In Cavium OCTEON TX SoC, all accesses to the device registers are
54 * implicitly strongly ordered. So, the relaxed version of IO operation is
55 * safe to use with out any IO memory barriers.
57 #define fpavf_read64 rte_read64_relaxed
58 #define fpavf_write64 rte_write64_relaxed
60 /* ARM64 specific functions */
61 #if defined(RTE_ARCH_ARM64)
62 #define fpavf_load_pair(val0, val1, addr) ({ \
64 "ldp %x[x0], %x[x1], [%x[p1]]" \
65 :[x0]"=r"(val0), [x1]"=r"(val1) \
69 #define fpavf_store_pair(val0, val1, addr) ({ \
71 "stp %x[x0], %x[x1], [%x[p1]]" \
72 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
74 #else /* Un optimized functions for building on non arm64 arch */
76 #define fpavf_load_pair(val0, val1, addr) \
78 val0 = rte_read64(addr); \
79 val1 = rte_read64(((uint8_t *)addr) + 8); \
82 #define fpavf_store_pair(val0, val1, addr) \
84 rte_write64(val0, addr); \
85 rte_write64(val1, (((uint8_t *)addr) + 8)); \
90 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
91 unsigned int buf_offset, int node);
93 octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
94 void *memva, uint16_t gpool);
96 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
98 octeontx_fpa_bufpool_block_size(uintptr_t handle);
100 octeontx_fpa_bufpool_free_count(uintptr_t handle);
102 static __rte_always_inline uint8_t
103 octeontx_fpa_bufpool_gpool(uintptr_t handle)
105 return (uint8_t)handle & FPA_GPOOL_MASK;
108 static __rte_always_inline uint16_t
109 octeontx_fpa_bufpool_gaura(uintptr_t handle)
111 return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
114 #endif /* __OCTEONTX_FPAVF_H__ */