1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
6 /* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
7 * driver. They are only included via qbman_private.h, which is itself a
8 * platform-independent file and is included by all the other driver source.
10 * qbman_sys_decl.h is included prior to all other declarations and logic, and
11 * it exists to provide compatibility with any linux interfaces our
12 * single-source driver code is dependent on (eg. kmalloc). Ie. this file
13 * provides linux compatibility.
15 * This qbman_sys.h header, on the other hand, is included *after* any common
16 * and platform-neutral declarations and logic in qbman_private.h, and exists to
17 * implement any platform-specific logic of the qbman driver itself. Ie. it is
18 * *not* to provide linux compatibility.
24 #include "qbman_sys_decl.h"
26 #define CENA_WRITE_ENABLE 0
27 #define CINH_WRITE_ENABLE 1
29 /* CINH register offsets */
30 #define QBMAN_CINH_SWP_EQCR_PI 0x800
31 #define QBMAN_CINH_SWP_EQCR_CI 0x840
32 #define QBMAN_CINH_SWP_EQAR 0x8c0
33 #define QBMAN_CINH_SWP_CR_RT 0x900
34 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
35 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
36 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
37 #define QBMAN_CINH_SWP_DQPI 0xa00
38 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
39 #define QBMAN_CINH_SWP_DCAP 0xac0
40 #define QBMAN_CINH_SWP_SDQCR 0xb00
41 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
42 #define QBMAN_CINH_SWP_RCR_PI 0xc00
43 #define QBMAN_CINH_SWP_RAR 0xcc0
44 #define QBMAN_CINH_SWP_ISR 0xe00
45 #define QBMAN_CINH_SWP_IER 0xe40
46 #define QBMAN_CINH_SWP_ISDR 0xe80
47 #define QBMAN_CINH_SWP_IIR 0xec0
48 #define QBMAN_CINH_SWP_ITPR 0xf40
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
60 /* CENA register offsets in memory-backed mode */
61 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
62 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
63 #define QBMAN_CENA_SWP_CR_MEM 0x1600
64 #define QBMAN_CENA_SWP_RR_MEM 0x1680
65 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
67 /* Debugging assists */
68 static inline void __hexdump(unsigned long start, unsigned long end,
69 unsigned long p, size_t sz, const unsigned char *c)
76 pos += sprintf(buf + pos, "%08lx: ", start);
78 if ((start < p) || (start >= (p + sz)))
79 pos += sprintf(buf + pos, "..");
81 pos += sprintf(buf + pos, "%02x", *(c++));
82 if (!(++start & 15)) {
100 static inline void hexdump(const void *ptr, size_t sz)
102 unsigned long p = (unsigned long)ptr;
103 unsigned long start = p & ~15;
104 unsigned long end = (p + sz + 15) & ~15;
105 const unsigned char *c = ptr;
107 __hexdump(start, end, p, sz, c);
110 /* Currently, the CENA support code expects each 32-bit word to be written in
111 * host order, and these are converted to hardware (little-endian) order on
112 * command submission. However, 64-bit quantities are must be written (and read)
113 * as two 32-bit words with the least-significant word first, irrespective of
116 static inline void u64_to_le32_copy(void *d, const uint64_t *s,
120 const uint32_t *ss = (const uint32_t *)s;
123 /* TBD: the toolchain was choking on the use of 64-bit types up
124 * until recently so this works entirely with 32-bit variables.
125 * When 64-bit types become usable again, investigate better
126 * ways of doing this.
128 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
139 static inline void u64_from_le32_copy(uint64_t *d, const void *s,
142 const uint32_t *ss = s;
143 uint32_t *dd = (uint32_t *)d;
146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
160 struct qbman_swp_sys {
161 /* On GPP, the sys support for qbman_swp is here. The CENA region isi
162 * not an mmap() of the real portal registers, but an allocated
163 * place-holder, because the actual writes/reads to/from the portal are
164 * marshalled from these allocated areas using QBMan's "MC access
165 * registers". CINH accesses are atomic so there's no need for a
172 enum qbman_eqcr_mode eqcr_mode;
175 /* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
176 * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
177 * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
178 * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
179 * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
180 * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
183 static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
186 __raw_writel(val, s->addr_cinh + offset);
187 #ifdef QBMAN_CINH_TRACE
188 pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
189 s->addr_cinh, s->idx, offset, val);
193 static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
195 uint32_t reg = __raw_readl(s->addr_cinh + offset);
196 #ifdef QBMAN_CINH_TRACE
197 pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
198 s->addr_cinh, s->idx, offset, reg);
203 static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
206 void *shadow = s->cena + offset;
208 #ifdef QBMAN_CENA_TRACE
209 pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
210 s->addr_cena, s->idx, offset, shadow);
212 QBMAN_BUG_ON(offset & 63);
217 static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
220 #ifdef QBMAN_CENA_TRACE
221 pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
222 s->addr_cena, s->idx, offset);
224 QBMAN_BUG_ON(offset & 63);
226 return (s->addr_cena + offset);
228 return (s->addr_cinh + offset);
232 static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
233 uint32_t offset, void *cmd)
235 const uint32_t *shadow = cmd;
237 #ifdef QBMAN_CENA_TRACE
238 pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
239 s->addr_cena, s->idx, offset, shadow);
243 for (loop = 15; loop >= 1; loop--)
244 __raw_writel(shadow[loop], s->addr_cena +
247 __raw_writel(shadow[0], s->addr_cena + offset);
249 for (loop = 15; loop >= 1; loop--)
250 __raw_writel(shadow[loop], s->addr_cinh +
253 __raw_writel(shadow[0], s->addr_cinh + offset);
255 dcbf(s->addr_cena + offset);
258 static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
261 #ifdef QBMAN_CENA_TRACE
262 pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
263 s->addr_cena, s->idx, offset);
265 dcbf(s->addr_cena + offset);
268 static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
271 return __raw_readl(s->addr_cena + offset);
274 static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
276 uint32_t *shadow = (uint32_t *)(s->cena + offset);
278 #ifdef QBMAN_CENA_TRACE
279 pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
280 s->addr_cena, s->idx, offset, shadow);
284 for (loop = 0; loop < 16; loop++)
285 shadow[loop] = __raw_readl(s->addr_cena + offset
288 for (loop = 0; loop < 16; loop++)
289 shadow[loop] = __raw_readl(s->addr_cinh + offset
292 #ifdef QBMAN_CENA_TRACE
298 static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
301 #ifdef QBMAN_CENA_TRACE
302 pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
303 s->addr_cena, s->idx, offset);
305 return s->addr_cena + offset;
308 static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
311 dccivac(s->addr_cena + offset);
314 static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
317 dccivac(s->addr_cena + offset);
318 prefetch_for_load(s->addr_cena + offset);
321 static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
324 prefetch_for_load(s->addr_cena + offset);
331 /* The SWP_CFG portal register is special, in that it is used by the
332 * platform-specific code rather than the platform-independent code in
333 * qbman_portal.c. So use of it is declared locally here.
335 #define QBMAN_CINH_SWP_CFG 0xd00
337 #define SWP_CFG_DQRR_MF_SHIFT 20
338 #define SWP_CFG_EST_SHIFT 16
339 #define SWP_CFG_CPBS_SHIFT 15
340 #define SWP_CFG_WN_SHIFT 14
341 #define SWP_CFG_RPM_SHIFT 12
342 #define SWP_CFG_DCM_SHIFT 10
343 #define SWP_CFG_EPM_SHIFT 8
344 #define SWP_CFG_VPM_SHIFT 7
345 #define SWP_CFG_CPM_SHIFT 6
346 #define SWP_CFG_SD_SHIFT 5
347 #define SWP_CFG_SP_SHIFT 4
348 #define SWP_CFG_SE_SHIFT 3
349 #define SWP_CFG_DP_SHIFT 2
350 #define SWP_CFG_DE_SHIFT 1
351 #define SWP_CFG_EP_SHIFT 0
353 static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
354 uint8_t est, uint8_t rpm, uint8_t dcm,
355 uint8_t epm, int sd, int sp, int se,
356 int dp, int de, int ep)
360 reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
361 est << SWP_CFG_EST_SHIFT |
362 wn << SWP_CFG_WN_SHIFT |
363 rpm << SWP_CFG_RPM_SHIFT |
364 dcm << SWP_CFG_DCM_SHIFT |
365 epm << SWP_CFG_EPM_SHIFT |
366 sd << SWP_CFG_SD_SHIFT |
367 sp << SWP_CFG_SP_SHIFT |
368 se << SWP_CFG_SE_SHIFT |
369 dp << SWP_CFG_DP_SHIFT |
370 de << SWP_CFG_DE_SHIFT |
371 ep << SWP_CFG_EP_SHIFT);
376 #define QMAN_RT_MODE 0x00000100
378 #define QMAN_REV_4000 0x04000000
379 #define QMAN_REV_4100 0x04010000
380 #define QMAN_REV_4101 0x04010001
381 #define QMAN_REV_5000 0x05000000
382 #define QMAN_REV_MASK 0xffff0000
384 static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
385 const struct qbman_swp_desc *d,
390 int cena_region_size = 4*1024;
392 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
393 && (d->cena_access_mode == qman_cena_fastest_access))
394 cena_region_size = 64*1024;
396 uint8_t wn = CENA_WRITE_ENABLE;
398 uint8_t wn = CINH_WRITE_ENABLE;
401 s->addr_cena = d->cena_bar;
402 s->addr_cinh = d->cinh_bar;
403 s->idx = (uint32_t)d->idx;
404 s->cena = malloc(cena_region_size);
407 pr_err("Could not allocate page for cena shadow\n");
410 s->eqcr_mode = d->eqcr_mode;
411 QBMAN_BUG_ON(d->idx < 0);
412 #ifdef QBMAN_CHECKING
413 /* We should never be asked to initialise for a portal that isn't in
414 * the power-on state. (Ie. don't forget to reset portals when they are
417 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
420 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
421 && (d->cena_access_mode == qman_cena_fastest_access))
422 memset(s->addr_cena, 0, cena_region_size);
424 /* Invalidate the portal memory.
425 * This ensures no stale cache lines
427 for (i = 0; i < cena_region_size; i += 64)
428 dccivac(s->addr_cena + i);
431 if (s->eqcr_mode == qman_eqcr_vb_array) {
432 reg = qbman_set_swp_cfg(dqrr_size, wn,
433 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
435 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000 &&
436 (d->cena_access_mode == qman_cena_fastest_access))
437 reg = qbman_set_swp_cfg(dqrr_size, wn,
438 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
440 reg = qbman_set_swp_cfg(dqrr_size, wn,
441 1, 3, 2, 2, 1, 1, 1, 1, 1, 1);
444 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
445 && (d->cena_access_mode == qman_cena_fastest_access))
446 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
447 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
448 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
450 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
451 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
453 pr_err("The portal %d is not enabled!\n", s->idx);
458 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
459 && (d->cena_access_mode == qman_cena_fastest_access)) {
460 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
461 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
467 static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
472 #endif /* _QBMAN_SYS_H_ */