1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2010-2016 Freescale Semiconductor Inc.
11 #include "bman_priv.h"
13 /* Cache-inhibited register offsets */
14 #define BM_REG_RCR_PI_CINH 0x3000
15 #define BM_REG_RCR_CI_CINH 0x3100
16 #define BM_REG_RCR_ITR 0x3200
17 #define BM_REG_CFG 0x3300
18 #define BM_REG_SCN(n) (0x3400 + ((n) << 6))
19 #define BM_REG_ISR 0x3e00
20 #define BM_REG_IIR 0x3ec0
22 /* Cache-enabled register offsets */
23 #define BM_CL_CR 0x0000
24 #define BM_CL_RR0 0x0100
25 #define BM_CL_RR1 0x0140
26 #define BM_CL_RCR 0x1000
27 #define BM_CL_RCR_PI_CENA 0x3000
28 #define BM_CL_RCR_CI_CENA 0x3100
30 /* BTW, the drivers (and h/w programming model) already obtain the required
31 * synchronisation for portal accesses via lwsync(), hwsync(), and
32 * data-dependencies. Use of barrier()s or other order-preserving primitives
33 * simply degrade performance. Hence the use of the __raw_*() interfaces, which
34 * simply ensure that the compiler treats the portal registers as volatile (ie.
38 /* Cache-inhibited register access. */
39 #define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
40 #define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
42 #define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
43 #define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
45 /* Cache-enabled (index) register access */
46 #define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
47 #define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
48 #define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
49 #define __bm_cl_out(bm, o, val) \
51 u32 *__tmpclout = (bm)->ce + (o); \
52 __raw_writel(cpu_to_be32(val), __tmpclout); \
55 #define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
56 #define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
57 #define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
58 #define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
59 #define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
60 #define bm_cl_invalidate(reg)\
61 __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
63 /* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
64 * analysis, look at using the "extra" bit in the ring index registers to avoid
67 static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
69 /* 'first' is included, 'last' is excluded */
72 return ringsize + last - first;
77 * pmode == production mode
78 * cmode == consumption mode,
79 * Enum values use 3 letter codes. First letter matches the portal mode,
80 * remaining two letters indicate;
81 * ci == cache-inhibited portal register
82 * ce == cache-enabled portal register
83 * vb == in-band valid-bit (cache-enabled)
85 enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
86 bm_rcr_pci = 0, /* PI index, cache-inhibited */
87 bm_rcr_pce = 1, /* PI index, cache-enabled */
88 bm_rcr_pvb = 2 /* valid-bit */
91 enum bm_rcr_cmode { /* s/w-only */
92 bm_rcr_cci, /* CI index, cache-inhibited */
93 bm_rcr_cce /* CI index, cache-enabled */
96 /* --- Portal structures --- */
101 struct bm_rcr_entry *ring, *cursor;
102 u8 ci, available, ithresh, vbit;
103 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
105 enum bm_rcr_pmode pmode;
106 enum bm_rcr_cmode cmode;
111 struct bm_mc_command *cr;
112 struct bm_mc_result *rr;
114 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
116 /* Can only be _mc_start()ed */
118 /* Can only be _mc_commit()ed or _mc_abort()ed */
120 /* Can only be _mc_retry()ed */
127 void __iomem *ce; /* cache-enabled */
128 void __iomem *ci; /* cache-inhibited */
135 struct bm_portal_config config;
136 } ____cacheline_aligned;
138 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
139 #define RCR_CARRYCLEAR(p) \
140 (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
142 /* Bit-wise logic to convert a ring pointer to a ring index */
143 static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
145 return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
148 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
149 static inline void RCR_INC(struct bm_rcr *rcr)
151 /* NB: this is odd-looking, but experiments show that it generates
152 * fast code with essentially no branching overheads. We increment to
153 * the next RCR pointer and handle overflow and 'vbit'.
155 struct bm_rcr_entry *partial = rcr->cursor + 1;
157 rcr->cursor = RCR_CARRYCLEAR(partial);
158 if (partial != rcr->cursor)
159 rcr->vbit ^= BM_RCR_VERB_VBIT;
162 static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
163 __maybe_unused enum bm_rcr_cmode cmode)
165 /* This use of 'register', as well as all other occurrences, is because
166 * it has been observed to generate much faster code with gcc than is
167 * otherwise the case.
169 register struct bm_rcr *rcr = &portal->rcr;
173 rcr->ring = portal->addr.ce + BM_CL_RCR;
174 rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
176 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
177 rcr->cursor = rcr->ring + pi;
178 rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
179 rcr->available = BM_RCR_SIZE - 1
180 - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
181 rcr->ithresh = bm_in(RCR_ITR);
182 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
187 cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
192 static inline void bm_rcr_finish(struct bm_portal *portal)
194 register struct bm_rcr *rcr = &portal->rcr;
195 u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
196 u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
198 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
199 DPAA_ASSERT(!rcr->busy);
201 if (pi != RCR_PTR2IDX(rcr->cursor))
202 pr_crit("losing uncommitted RCR entries\n");
204 pr_crit("missing existing RCR completions\n");
205 if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
206 pr_crit("RCR destroyed unquiesced\n");
209 static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
211 register struct bm_rcr *rcr = &portal->rcr;
213 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
214 DPAA_ASSERT(!rcr->busy);
218 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
221 dcbz_64(rcr->cursor);
225 static inline void bm_rcr_abort(struct bm_portal *portal)
227 __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
229 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
230 DPAA_ASSERT(rcr->busy);
235 static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
236 struct bm_portal *portal, u8 myverb)
238 register struct bm_rcr *rcr = &portal->rcr;
240 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
241 DPAA_ASSERT(rcr->busy);
242 DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
244 if (rcr->available == 1)
246 rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
247 dcbf_64(rcr->cursor);
250 dcbz_64(rcr->cursor);
254 static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
256 register struct bm_rcr *rcr = &portal->rcr;
258 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
259 DPAA_ASSERT(rcr->busy);
260 DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
262 rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
266 bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
267 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
272 static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
274 __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
276 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
277 DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
279 bm_cl_invalidate(RCR_PI);
280 bm_cl_touch_rw(RCR_PI);
283 static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
285 register struct bm_rcr *rcr = &portal->rcr;
287 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
288 DPAA_ASSERT(rcr->busy);
289 DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
291 rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
295 bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
296 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
301 static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
303 register struct bm_rcr *rcr = &portal->rcr;
304 struct bm_rcr_entry *rcursor;
306 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
307 DPAA_ASSERT(rcr->busy);
308 DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
311 rcursor = rcr->cursor;
312 rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
316 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
321 static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
323 register struct bm_rcr *rcr = &portal->rcr;
324 u8 diff, old_ci = rcr->ci;
326 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
327 DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
329 rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
330 diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
331 rcr->available += diff;
335 static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
337 __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
339 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
340 DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
342 bm_cl_touch_ro(RCR_CI);
345 static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
347 register struct bm_rcr *rcr = &portal->rcr;
348 u8 diff, old_ci = rcr->ci;
350 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
351 DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
353 rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
354 bm_cl_invalidate(RCR_CI);
355 diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
356 rcr->available += diff;
360 static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
362 register struct bm_rcr *rcr = &portal->rcr;
367 static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
369 register struct bm_rcr *rcr = &portal->rcr;
371 rcr->ithresh = ithresh;
372 bm_out(RCR_ITR, ithresh);
375 static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
377 register struct bm_rcr *rcr = &portal->rcr;
379 return rcr->available;
382 static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
384 register struct bm_rcr *rcr = &portal->rcr;
386 return BM_RCR_SIZE - 1 - rcr->available;
389 /* --- Management command API --- */
391 static inline int bm_mc_init(struct bm_portal *portal)
393 register struct bm_mc *mc = &portal->mc;
395 mc->cr = portal->addr.ce + BM_CL_CR;
396 mc->rr = portal->addr.ce + BM_CL_RR0;
397 mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
398 BM_MCC_VERB_VBIT) ? 0 : 1;
399 mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
400 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
406 static inline void bm_mc_finish(struct bm_portal *portal)
408 __maybe_unused register struct bm_mc *mc = &portal->mc;
410 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
411 DPAA_ASSERT(mc->state == mc_idle);
412 if (mc->state != mc_idle)
413 pr_crit("Losing incomplete MC command\n");
417 static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
419 register struct bm_mc *mc = &portal->mc;
421 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
422 DPAA_ASSERT(mc->state == mc_idle);
429 static inline void bm_mc_abort(struct bm_portal *portal)
431 __maybe_unused register struct bm_mc *mc = &portal->mc;
433 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
434 DPAA_ASSERT(mc->state == mc_user);
439 static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
441 register struct bm_mc *mc = &portal->mc;
442 struct bm_mc_result *rr = mc->rr + mc->rridx;
444 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
445 DPAA_ASSERT(mc->state == mc_user);
448 mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
451 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
456 static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
458 register struct bm_mc *mc = &portal->mc;
459 struct bm_mc_result *rr = mc->rr + mc->rridx;
461 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
462 DPAA_ASSERT(mc->state == mc_hw);
464 /* The inactive response register's verb byte always returns zero until
465 * its command is submitted and completed. This includes the valid-bit,
466 * in case you were wondering.
468 if (!__raw_readb(&rr->verb)) {
473 mc->vbit ^= BM_MCC_VERB_VBIT;
474 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
480 #define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
481 #define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
482 static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
487 DPAA_ASSERT(bpid < bman_pool_max);
488 /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
489 val = __bm_in(&portal->addr, SCN_REG(bpid));
491 val |= SCN_BIT(bpid);
493 val &= ~SCN_BIT(bpid);
494 __bm_out(&portal->addr, SCN_REG(bpid), val);
497 static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
499 #if defined(RTE_ARCH_ARM64)
500 return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
502 return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
506 static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
509 #if defined(RTE_ARCH_ARM64)
510 __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
512 __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
516 /* Buffer Pool Cleanup */
517 static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
519 struct bm_mc_command *bm_cmd;
520 struct bm_mc_result *bm_res;
526 /* Acquire buffers until empty */
527 bm_cmd = bm_mc_start(p);
528 bm_cmd->acquire.bpid = bpid;
529 bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
530 while (!(bm_res = bm_mc_result(p)))
532 if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
541 #endif /* __BMAN_H */