1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2019-2020 NXP
6 /* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
7 * driver. They are only included via qbman_private.h, which is itself a
8 * platform-independent file and is included by all the other driver source.
10 * qbman_sys_decl.h is included prior to all other declarations and logic, and
11 * it exists to provide compatibility with any linux interfaces our
12 * single-source driver code is dependent on (eg. kmalloc). Ie. this file
13 * provides linux compatibility.
15 * This qbman_sys.h header, on the other hand, is included *after* any common
16 * and platform-neutral declarations and logic in qbman_private.h, and exists to
17 * implement any platform-specific logic of the qbman driver itself. Ie. it is
18 * *not* to provide linux compatibility.
24 #include "qbman_sys_decl.h"
26 #define CENA_WRITE_ENABLE 0
27 #define CINH_WRITE_ENABLE 1
29 /* CINH register offsets */
30 #define QBMAN_CINH_SWP_EQCR_PI 0x800
31 #define QBMAN_CINH_SWP_EQCR_CI 0x840
32 #define QBMAN_CINH_SWP_EQAR 0x8c0
33 #define QBMAN_CINH_SWP_CR_RT 0x900
34 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
35 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
36 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
37 #define QBMAN_CINH_SWP_DQPI 0xa00
38 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
39 #define QBMAN_CINH_SWP_DCAP 0xac0
40 #define QBMAN_CINH_SWP_SDQCR 0xb00
41 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
42 #define QBMAN_CINH_SWP_RCR_PI 0xc00
43 #define QBMAN_CINH_SWP_RAR 0xcc0
44 #define QBMAN_CINH_SWP_ISR 0xe00
45 #define QBMAN_CINH_SWP_IER 0xe40
46 #define QBMAN_CINH_SWP_ISDR 0xe80
47 #define QBMAN_CINH_SWP_IIR 0xec0
48 #define QBMAN_CINH_SWP_ITPR 0xf40
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
60 /* CENA register offsets in memory-backed mode */
61 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
62 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
63 #define QBMAN_CENA_SWP_CR_MEM 0x1600
64 #define QBMAN_CENA_SWP_RR_MEM 0x1680
65 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
67 /* Debugging assists */
68 static inline void __hexdump(unsigned long start, unsigned long end,
69 unsigned long p, size_t sz, const unsigned char *c)
76 pos += sprintf(buf + pos, "%08lx: ", start);
78 if ((start < p) || (start >= (p + sz)))
79 pos += sprintf(buf + pos, "..");
81 pos += sprintf(buf + pos, "%02x", *(c++));
82 if (!(++start & 15)) {
100 static inline void hexdump(const void *ptr, size_t sz)
102 unsigned long p = (unsigned long)ptr;
103 unsigned long start = p & ~15;
104 unsigned long end = (p + sz + 15) & ~15;
105 const unsigned char *c = ptr;
107 __hexdump(start, end, p, sz, c);
110 /* Currently, the CENA support code expects each 32-bit word to be written in
111 * host order, and these are converted to hardware (little-endian) order on
112 * command submission. However, 64-bit quantities are must be written (and read)
113 * as two 32-bit words with the least-significant word first, irrespective of
116 static inline void u64_to_le32_copy(void *d, const uint64_t *s,
120 const uint32_t *ss = (const uint32_t *)s;
123 /* TBD: the toolchain was choking on the use of 64-bit types up
124 * until recently so this works entirely with 32-bit variables.
125 * When 64-bit types become usable again, investigate better
126 * ways of doing this.
128 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
139 static inline void u64_from_le32_copy(uint64_t *d, const void *s,
142 const uint32_t *ss = s;
143 uint32_t *dd = (uint32_t *)d;
146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
160 struct qbman_swp_sys {
161 /* On GPP, the sys support for qbman_swp is here. The CENA region isi
162 * not an mmap() of the real portal registers, but an allocated
163 * place-holder, because the actual writes/reads to/from the portal are
164 * marshalled from these allocated areas using QBMan's "MC access
165 * registers". CINH accesses are atomic so there's no need for a
172 enum qbman_eqcr_mode eqcr_mode;
175 /* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
176 * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
177 * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
178 * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
179 * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
180 * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
183 static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
186 __raw_writel(val, s->addr_cinh + offset);
187 #ifdef QBMAN_CINH_TRACE
188 pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
189 s->addr_cinh, s->idx, offset, val);
193 static inline void *qbman_cinh_write_start_wo_shadow(struct qbman_swp_sys *s,
196 #ifdef QBMAN_CINH_TRACE
197 pr_info("qbman_cinh_write_start(%p:%d:0x%03x)\n",
198 s->addr_cinh, s->idx, offset);
200 QBMAN_BUG_ON(offset & 63);
201 return (s->addr_cinh + offset);
204 static inline void qbman_cinh_write_complete(struct qbman_swp_sys *s,
205 uint32_t offset, void *cmd)
207 const uint32_t *shadow = cmd;
209 #ifdef QBMAN_CINH_TRACE
210 pr_info("qbman_cinh_write_complete(%p:%d:0x%03x) %p\n",
211 s->addr_cinh, s->idx, offset, shadow);
214 for (loop = 15; loop >= 1; loop--)
215 __raw_writel(shadow[loop], s->addr_cinh +
218 __raw_writel(shadow[0], s->addr_cinh + offset);
221 static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
223 uint32_t reg = __raw_readl(s->addr_cinh + offset);
224 #ifdef QBMAN_CINH_TRACE
225 pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
226 s->addr_cinh, s->idx, offset, reg);
231 static inline void *qbman_cinh_read_shadow(struct qbman_swp_sys *s,
234 uint32_t *shadow = (uint32_t *)(s->cena + offset);
236 #ifdef QBMAN_CINH_TRACE
237 pr_info(" %s (%p:%d:0x%03x) %p\n", __func__,
238 s->addr_cinh, s->idx, offset, shadow);
241 for (loop = 0; loop < 16; loop++)
242 shadow[loop] = __raw_readl(s->addr_cinh + offset
244 #ifdef QBMAN_CINH_TRACE
250 static inline void *qbman_cinh_read_wo_shadow(struct qbman_swp_sys *s,
253 #ifdef QBMAN_CINH_TRACE
254 pr_info("qbman_cinh_read(%p:%d:0x%03x)\n",
255 s->addr_cinh, s->idx, offset);
257 return s->addr_cinh + offset;
260 static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
263 void *shadow = s->cena + offset;
265 #ifdef QBMAN_CENA_TRACE
266 pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
267 s->addr_cena, s->idx, offset, shadow);
269 QBMAN_BUG_ON(offset & 63);
274 static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
277 #ifdef QBMAN_CENA_TRACE
278 pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
279 s->addr_cena, s->idx, offset);
281 QBMAN_BUG_ON(offset & 63);
283 return (s->addr_cena + offset);
285 return (s->addr_cinh + offset);
289 static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
290 uint32_t offset, void *cmd)
292 const uint32_t *shadow = cmd;
294 #ifdef QBMAN_CENA_TRACE
295 pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
296 s->addr_cena, s->idx, offset, shadow);
300 for (loop = 15; loop >= 1; loop--)
301 __raw_writel(shadow[loop], s->addr_cena +
304 __raw_writel(shadow[0], s->addr_cena + offset);
306 for (loop = 15; loop >= 1; loop--)
307 __raw_writel(shadow[loop], s->addr_cinh +
310 __raw_writel(shadow[0], s->addr_cinh + offset);
312 dcbf(s->addr_cena + offset);
315 static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
318 #ifdef QBMAN_CENA_TRACE
319 pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
320 s->addr_cena, s->idx, offset);
322 dcbf(s->addr_cena + offset);
325 static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
328 return __raw_readl(s->addr_cena + offset);
331 static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
333 uint32_t *shadow = (uint32_t *)(s->cena + offset);
335 #ifdef QBMAN_CENA_TRACE
336 pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
337 s->addr_cena, s->idx, offset, shadow);
341 for (loop = 0; loop < 16; loop++)
342 shadow[loop] = __raw_readl(s->addr_cena + offset
345 for (loop = 0; loop < 16; loop++)
346 shadow[loop] = __raw_readl(s->addr_cinh + offset
349 #ifdef QBMAN_CENA_TRACE
355 static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
358 #ifdef QBMAN_CENA_TRACE
359 pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
360 s->addr_cena, s->idx, offset);
362 return s->addr_cena + offset;
365 static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
368 dccivac(s->addr_cena + offset);
371 static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
374 dccivac(s->addr_cena + offset);
375 prefetch_for_load(s->addr_cena + offset);
378 static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
381 prefetch_for_load(s->addr_cena + offset);
388 /* The SWP_CFG portal register is special, in that it is used by the
389 * platform-specific code rather than the platform-independent code in
390 * qbman_portal.c. So use of it is declared locally here.
392 #define QBMAN_CINH_SWP_CFG 0xd00
394 #define SWP_CFG_DQRR_MF_SHIFT 20
395 #define SWP_CFG_EST_SHIFT 16
396 #define SWP_CFG_CPBS_SHIFT 15
397 #define SWP_CFG_WN_SHIFT 14
398 #define SWP_CFG_RPM_SHIFT 12
399 #define SWP_CFG_DCM_SHIFT 10
400 #define SWP_CFG_EPM_SHIFT 8
401 #define SWP_CFG_VPM_SHIFT 7
402 #define SWP_CFG_CPM_SHIFT 6
403 #define SWP_CFG_SD_SHIFT 5
404 #define SWP_CFG_SP_SHIFT 4
405 #define SWP_CFG_SE_SHIFT 3
406 #define SWP_CFG_DP_SHIFT 2
407 #define SWP_CFG_DE_SHIFT 1
408 #define SWP_CFG_EP_SHIFT 0
410 static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
411 uint8_t est, uint8_t rpm, uint8_t dcm,
412 uint8_t epm, int sd, int sp, int se,
413 int dp, int de, int ep)
417 reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
418 est << SWP_CFG_EST_SHIFT |
419 wn << SWP_CFG_WN_SHIFT |
420 rpm << SWP_CFG_RPM_SHIFT |
421 dcm << SWP_CFG_DCM_SHIFT |
422 epm << SWP_CFG_EPM_SHIFT |
423 sd << SWP_CFG_SD_SHIFT |
424 sp << SWP_CFG_SP_SHIFT |
425 se << SWP_CFG_SE_SHIFT |
426 dp << SWP_CFG_DP_SHIFT |
427 de << SWP_CFG_DE_SHIFT |
428 ep << SWP_CFG_EP_SHIFT);
433 #define QMAN_RT_MODE 0x00000100
435 #define QMAN_REV_4000 0x04000000
436 #define QMAN_REV_4100 0x04010000
437 #define QMAN_REV_4101 0x04010001
438 #define QMAN_REV_5000 0x05000000
439 #define QMAN_REV_MASK 0xffff0000
441 #define SVR_LS1080A 0x87030000
442 #define SVR_LS2080A 0x87010000
443 #define SVR_LS2088A 0x87090000
444 #define SVR_LX2160A 0x87360000
446 /* Variable to store DPAA2 platform type */
447 extern uint32_t dpaa2_svr_family;
449 static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
450 const struct qbman_swp_desc *d,
455 int cena_region_size = 4*1024;
458 uint8_t wn = CENA_WRITE_ENABLE;
460 uint8_t wn = CINH_WRITE_ENABLE;
464 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
465 && (d->cena_access_mode == qman_cena_fastest_access))
466 cena_region_size = 64*1024;
467 s->addr_cena = d->cena_bar;
468 s->addr_cinh = d->cinh_bar;
469 s->idx = (uint32_t)d->idx;
470 s->cena = malloc(cena_region_size);
473 pr_err("Could not allocate page for cena shadow\n");
476 s->eqcr_mode = d->eqcr_mode;
477 QBMAN_BUG_ON(d->idx < 0);
478 #ifdef QBMAN_CHECKING
479 /* We should never be asked to initialise for a portal that isn't in
480 * the power-on state. (Ie. don't forget to reset portals when they are
483 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
486 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
487 && (d->cena_access_mode == qman_cena_fastest_access))
488 memset(s->addr_cena, 0, cena_region_size);
490 /* Invalidate the portal memory.
491 * This ensures no stale cache lines
493 for (i = 0; i < cena_region_size; i += 64)
494 dccivac(s->addr_cena + i);
497 if (dpaa2_svr_family == SVR_LS1080A)
500 if (s->eqcr_mode == qman_eqcr_vb_array) {
501 reg = qbman_set_swp_cfg(dqrr_size, wn,
502 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
504 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000 &&
505 (d->cena_access_mode == qman_cena_fastest_access))
506 reg = qbman_set_swp_cfg(dqrr_size, wn,
507 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
509 reg = qbman_set_swp_cfg(dqrr_size, wn,
510 est, 3, 2, 2, 1, 1, 1, 1, 1, 1);
513 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
514 && (d->cena_access_mode == qman_cena_fastest_access))
515 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
516 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
517 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
519 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
520 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
522 pr_err("The portal %d is not enabled!\n", s->idx);
527 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
528 && (d->cena_access_mode == qman_cena_fastest_access)) {
529 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
530 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
536 static inline int qbman_swp_sys_update(struct qbman_swp_sys *s,
537 const struct qbman_swp_desc *d,
543 int cena_region_size = 4*1024;
546 uint8_t wn = CENA_WRITE_ENABLE;
548 uint8_t wn = CINH_WRITE_ENABLE;
552 wn = CINH_WRITE_ENABLE;
554 QBMAN_BUG_ON(d->idx < 0);
555 #ifdef QBMAN_CHECKING
556 /* We should never be asked to initialise for a portal that isn't in
557 * the power-on state. (Ie. don't forget to reset portals when they are
560 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
563 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
564 && (d->cena_access_mode == qman_cena_fastest_access))
565 memset(s->addr_cena, 0, cena_region_size);
567 /* Invalidate the portal memory.
568 * This ensures no stale cache lines
570 for (i = 0; i < cena_region_size; i += 64)
571 dccivac(s->addr_cena + i);
574 if (dpaa2_svr_family == SVR_LS1080A)
577 if (s->eqcr_mode == qman_eqcr_vb_array) {
578 reg = qbman_set_swp_cfg(dqrr_size, wn,
579 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
581 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000 &&
582 (d->cena_access_mode == qman_cena_fastest_access))
583 reg = qbman_set_swp_cfg(dqrr_size, wn,
584 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
586 reg = qbman_set_swp_cfg(dqrr_size, wn,
587 est, 3, 2, 2, 1, 1, 1, 1, 1, 1);
590 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
591 && (d->cena_access_mode == qman_cena_fastest_access))
592 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
593 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
594 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
596 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
597 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
599 pr_err("The portal %d is not enabled!\n", s->idx);
603 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
604 && (d->cena_access_mode == qman_cena_fastest_access)) {
605 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
606 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
612 static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
617 #endif /* _QBMAN_SYS_H_ */