1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
9 #include <rte_branch_prediction.h>
11 /* Compilation constants */
12 #define RCR_THRESH 2 /* reread h/w CI when running out of space */
13 #define IRQNAME "BMan portal %d"
14 #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
18 /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
19 struct bman_depletion *pools;
21 unsigned long irq_sources;
22 u32 slowpoll; /* only used when interrupts are off */
23 /* When the cpu-affine portal is activated, this is non-NULL */
24 const struct bm_portal_config *config;
25 char irqname[MAX_IRQNAME];
28 static cpumask_t affine_mask;
29 static DEFINE_SPINLOCK(affine_mask_lock);
30 static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
32 static inline struct bman_portal *get_affine_portal(void)
34 return &RTE_PER_LCORE(bman_affine_portal);
38 * This object type refers to a pool, it isn't *the* pool. There may be
39 * more than one such object per BMan buffer pool, eg. if different users of
40 * the pool are operating via different portals.
43 struct bman_pool_params params;
44 /* Used for hash-table admin when using depletion notifications. */
45 struct bman_portal *portal;
46 struct bman_pool *next;
47 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
53 struct bman_portal *bman_create_portal(struct bman_portal *portal,
54 const struct bm_portal_config *c)
57 const struct bman_depletion *pools = &c->mask;
63 * prep the low-level portal struct with the mapped addresses from the
64 * config, everything that follows depends on it and "config" is more
65 * for (de)reference...
67 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
68 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
69 if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
70 pr_err("Bman RCR initialisation failed\n");
74 pr_err("Bman MC initialisation failed\n");
77 portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
80 portal->pools[0] = *pools;
81 bman_depletion_init(portal->pools + 1);
82 while (bpid < bman_pool_max) {
84 * Default to all BPIDs disabled, we enable as required at
87 bm_isr_bscn_mask(p, bpid, 0);
91 /* Write-to-clear any stale interrupt status bits */
92 bm_isr_disable_write(p, 0xffffffff);
93 portal->irq_sources = 0;
94 bm_isr_enable_write(p, portal->irq_sources);
95 bm_isr_status_clear(p, 0xffffffff);
96 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
97 if (request_irq(c->irq, NULL, 0, portal->irqname,
99 pr_err("request_irq() failed\n");
103 /* Need RCR to be empty before continuing */
104 ret = bm_rcr_get_fill(p);
106 pr_err("Bman RCR unclean\n");
112 bm_isr_disable_write(p, 0);
116 free_irq(c->irq, portal);
118 kfree(portal->pools);
127 bman_create_affine_portal(const struct bm_portal_config *c)
129 struct bman_portal *portal = get_affine_portal();
131 /*This function is called from the context which is already affine to
132 *CPU or in other words this in non-migratable to other CPUs.
134 portal = bman_create_portal(portal, c);
136 spin_lock(&affine_mask_lock);
137 CPU_SET(c->cpu, &affine_mask);
138 spin_unlock(&affine_mask_lock);
144 void bman_destroy_portal(struct bman_portal *bm)
146 const struct bm_portal_config *pcfg;
149 bm_rcr_cce_update(&bm->p);
150 bm_rcr_cce_update(&bm->p);
152 free_irq(pcfg->irq, bm);
155 bm_mc_finish(&bm->p);
156 bm_rcr_finish(&bm->p);
161 bm_portal_config *bman_destroy_affine_portal(void)
163 struct bman_portal *bm = get_affine_portal();
164 const struct bm_portal_config *pcfg;
167 bman_destroy_portal(bm);
168 spin_lock(&affine_mask_lock);
169 CPU_CLR(pcfg->cpu, &affine_mask);
170 spin_unlock(&affine_mask_lock);
175 bman_get_portal_index(void)
177 struct bman_portal *p = get_affine_portal();
178 return p->config->index;
181 static const u32 zero_thresholds[4] = {0, 0, 0, 0};
183 struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
185 struct bman_pool *pool = NULL;
188 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
189 int ret = bman_alloc_bpid(&bpid);
194 if (params->bpid >= bman_pool_max)
198 if (params->flags & BMAN_POOL_FLAG_THRESH) {
199 int ret = bm_pool_set(bpid, params->thresholds);
205 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
208 pool->params = *params;
209 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
210 atomic_set(&pool->in_use, 1);
212 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
213 pool->params.bpid = bpid;
217 if (params->flags & BMAN_POOL_FLAG_THRESH)
218 bm_pool_set(bpid, zero_thresholds);
220 if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
221 bman_release_bpid(bpid);
227 void bman_free_pool(struct bman_pool *pool)
229 if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
230 bm_pool_set(pool->params.bpid, zero_thresholds);
231 if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
232 bman_release_bpid(pool->params.bpid);
236 const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
238 return &pool->params;
241 static void update_rcr_ci(struct bman_portal *p, int avail)
244 bm_rcr_cce_prefetch(&p->p);
246 bm_rcr_cce_update(&p->p);
249 #define BMAN_BUF_MASK 0x0000fffffffffffful
250 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
251 u32 flags __maybe_unused)
253 struct bman_portal *p;
254 struct bm_rcr_entry *r;
258 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
259 if (!num || (num > 8))
261 if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
265 p = get_affine_portal();
266 avail = bm_rcr_get_avail(&p->p);
268 update_rcr_ci(p, avail);
269 r = bm_rcr_start(&p->p);
274 * we can copy all but the first entry, as this can trigger badness
278 cpu_to_be64(((u64)pool->params.bpid << 48) |
279 (bufs[0].opaque & BMAN_BUF_MASK));
281 for (i = 1; i < num; i++)
283 cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
286 bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
287 (num & BM_RCR_VERB_BUFCOUNT_MASK));
292 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
293 u32 flags __maybe_unused)
295 struct bman_portal *p = get_affine_portal();
296 struct bm_mc_command *mcc;
297 struct bm_mc_result *mcr;
300 #ifdef RTE_LIBRTE_DPAA_HWDEBUG
301 if (!num || (num > 8))
303 if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
307 mcc = bm_mc_start(&p->p);
308 mcc->acquire.bpid = pool->params.bpid;
309 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
310 (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
311 while (!(mcr = bm_mc_result(&p->p)))
313 ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
315 for (i = 0; i < num; i++)
317 be64_to_cpu(mcr->acquire.bufs[i].opaque);
324 int bman_query_pools(struct bm_pool_state *state)
326 struct bman_portal *p = get_affine_portal();
327 struct bm_mc_result *mcr;
330 bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
331 while (!(mcr = bm_mc_result(&p->p)))
333 DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
334 BM_MCR_VERB_CMD_QUERY);
336 state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
337 state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
338 state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
339 state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
343 u32 bman_query_free_buffers(struct bman_pool *pool)
345 return bm_pool_free_buffers(pool->params.bpid);
348 int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
352 bpid = bman_get_params(pool)->bpid;
354 return bm_pool_set(bpid, thresholds);
357 int bman_shutdown_pool(u32 bpid)
359 struct bman_portal *p = get_affine_portal();
360 return bm_shutdown_pool(&p->p, bpid);