bus/fslmc: add dynamic config for memback portal mode
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 /* Internal Function declaration */
65 static int
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67                 const struct qbman_eq_desc *d,
68                 const struct qbman_fd *fd);
69 static int
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71                 const struct qbman_eq_desc *d,
72                 const struct qbman_fd *fd);
73
74 static int
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76                 const struct qbman_eq_desc *d,
77                 const struct qbman_fd *fd);
78 static int
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80                 const struct qbman_eq_desc *d,
81                 const struct qbman_fd *fd);
82
83 static int
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85                 const struct qbman_eq_desc *d,
86                 const struct qbman_fd *fd,
87                 uint32_t *flags,
88                 int num_frames);
89 static int
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91                 const struct qbman_eq_desc *d,
92                 const struct qbman_fd *fd,
93                 uint32_t *flags,
94                 int num_frames);
95
96 static int
97 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
98                 const struct qbman_eq_desc *d,
99                 const struct qbman_fd *fd,
100                 int num_frames);
101 static int
102 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
103                 const struct qbman_eq_desc *d,
104                 const struct qbman_fd *fd,
105                 int num_frames);
106
107 static int
108 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
109 static int
110 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
111
112 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
113 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
114
115 static int
116 qbman_swp_release_direct(struct qbman_swp *s,
117                 const struct qbman_release_desc *d,
118                 const uint64_t *buffers, unsigned int num_buffers);
119 static int
120 qbman_swp_release_mem_back(struct qbman_swp *s,
121                 const struct qbman_release_desc *d,
122                 const uint64_t *buffers, unsigned int num_buffers);
123
124 /* Function pointers */
125 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
126                 const struct qbman_eq_desc *d,
127                 const struct qbman_fd *fd)
128         = qbman_swp_enqueue_array_mode_direct;
129
130 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
131                 const struct qbman_eq_desc *d,
132                 const struct qbman_fd *fd)
133         = qbman_swp_enqueue_ring_mode_direct;
134
135 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
136                 const struct qbman_eq_desc *d,
137                 const struct qbman_fd *fd,
138                 uint32_t *flags,
139                 int num_frames)
140         = qbman_swp_enqueue_multiple_direct;
141
142 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
143                 const struct qbman_eq_desc *d,
144                 const struct qbman_fd *fd,
145                 int num_frames)
146         = qbman_swp_enqueue_multiple_desc_direct;
147
148 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
149                 struct qbman_pull_desc *d)
150         = qbman_swp_pull_direct;
151
152 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
153                 = qbman_swp_dqrr_next_direct;
154
155 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
156                         const struct qbman_release_desc *d,
157                         const uint64_t *buffers, unsigned int num_buffers)
158                         = qbman_swp_release_direct;
159
160 /*********************************/
161 /* Portal constructor/destructor */
162 /*********************************/
163
164 /* Software portals should always be in the power-on state when we initialise,
165  * due to the CCSR-based portal reset functionality that MC has.
166  *
167  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
168  * valid-bits, so we need to support a workaround where we don't trust
169  * valid-bits when detecting new entries until any stale ring entries have been
170  * overwritten at least once. The idea is that we read PI for the first few
171  * entries, then switch to valid-bit after that. The trick is to clear the
172  * bug-work-around boolean once the PI wraps around the ring for the first time.
173  *
174  * Note: this still carries a slight additional cost once the decrementer hits
175  * zero.
176  */
177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
178 {
179         int ret;
180         uint32_t eqcr_pi;
181         uint32_t mask_size;
182         struct qbman_swp *p = malloc(sizeof(*p));
183
184         if (!p)
185                 return NULL;
186
187         memset(p, 0, sizeof(struct qbman_swp));
188
189         p->desc = *d;
190 #ifdef QBMAN_CHECKING
191         p->mc.check = swp_mc_can_start;
192 #endif
193         p->mc.valid_bit = QB_VALID_BIT;
194         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
195         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
196         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
197         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
198                         && (d->cena_access_mode == qman_cena_fastest_access))
199                 p->mr.valid_bit = QB_VALID_BIT;
200
201         atomic_set(&p->vdq.busy, 1);
202         p->vdq.valid_bit = QB_VALID_BIT;
203         p->dqrr.valid_bit = QB_VALID_BIT;
204         qman_version = p->desc.qman_version;
205         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
206                 p->dqrr.dqrr_size = 4;
207                 p->dqrr.reset_bug = 1;
208         } else {
209                 p->dqrr.dqrr_size = 8;
210                 p->dqrr.reset_bug = 0;
211         }
212
213         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
214         if (ret) {
215                 free(p);
216                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
217                 return NULL;
218         }
219
220         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
221          * in default state which is an error
222          */
223         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
224                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
225                 free(p);
226                 return NULL;
227         }
228
229         /* SDQCR needs to be initialized to 0 when no channels are
230          * being dequeued from or else the QMan HW will indicate an
231          * error.  The values that were calculated above will be
232          * applied when dequeues from a specific channel are enabled.
233          */
234         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
235
236         p->eqcr.pi_ring_size = 8;
237         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
238                         && (d->cena_access_mode == qman_cena_fastest_access)) {
239                 p->eqcr.pi_ring_size = 32;
240                 qbman_swp_enqueue_array_mode_ptr =
241                                 qbman_swp_enqueue_array_mode_mem_back;
242                 qbman_swp_enqueue_ring_mode_ptr =
243                                 qbman_swp_enqueue_ring_mode_mem_back;
244                 qbman_swp_enqueue_multiple_ptr =
245                                 qbman_swp_enqueue_multiple_mem_back;
246                 qbman_swp_enqueue_multiple_desc_ptr =
247                                 qbman_swp_enqueue_multiple_desc_mem_back;
248                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
249                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
250                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
251         }
252
253         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
254                 p->eqcr.pi_mask = (p->eqcr.pi_mask<<1) + 1;
255         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
256         p->eqcr.pi = eqcr_pi & p->eqcr.pi_mask;
257         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
258         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
259                         && (d->cena_access_mode == qman_cena_fastest_access))
260                 p->eqcr.ci = qbman_cinh_read(&p->sys,
261                                 QBMAN_CINH_SWP_EQCR_CI) & p->eqcr.pi_mask;
262         else
263                 p->eqcr.ci = qbman_cinh_read(&p->sys,
264                                 QBMAN_CINH_SWP_EQCR_PI) & p->eqcr.pi_mask;
265         p->eqcr.available = p->eqcr.pi_ring_size -
266                                 qm_cyc_diff(p->eqcr.pi_ring_size,
267                                 p->eqcr.ci & (p->eqcr.pi_mask<<1),
268                                 p->eqcr.pi & (p->eqcr.pi_mask<<1));
269
270         portal_idx_map[p->desc.idx] = p;
271         return p;
272 }
273
274 void qbman_swp_finish(struct qbman_swp *p)
275 {
276 #ifdef QBMAN_CHECKING
277         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
278 #endif
279         qbman_swp_sys_finish(&p->sys);
280         portal_idx_map[p->desc.idx] = NULL;
281         free(p);
282 }
283
284 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
285 {
286         return &p->desc;
287 }
288
289 /**************/
290 /* Interrupts */
291 /**************/
292
293 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
294 {
295         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
296 }
297
298 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
299 {
300         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
301 }
302
303 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
304 {
305         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
306 }
307
308 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
309 {
310         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
311 }
312
313 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
314 {
315         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
316 }
317
318 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
319 {
320         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
321 }
322
323 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
324 {
325         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
326 }
327
328 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
329 {
330         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
331 }
332
333 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
334 {
335         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
336 }
337
338 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
339 {
340         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
341 }
342
343 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
344 {
345         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
346 }
347
348 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
349 {
350         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
351                          inhibit ? 0xffffffff : 0);
352 }
353
354 /***********************/
355 /* Management commands */
356 /***********************/
357
358 /*
359  * Internal code common to all types of management commands.
360  */
361
362 void *qbman_swp_mc_start(struct qbman_swp *p)
363 {
364         void *ret;
365 #ifdef QBMAN_CHECKING
366         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
367 #endif
368         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
369                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
370                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
371         else
372                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
373 #ifdef QBMAN_CHECKING
374         if (!ret)
375                 p->mc.check = swp_mc_can_submit;
376 #endif
377         return ret;
378 }
379
380 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
381 {
382         uint8_t *v = cmd;
383 #ifdef QBMAN_CHECKING
384         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
385 #endif
386         /* TBD: "|=" is going to hurt performance. Need to move as many fields
387          * out of word zero, and for those that remain, the "OR" needs to occur
388          * at the caller side. This debug check helps to catch cases where the
389          * caller wants to OR but has forgotten to do so.
390          */
391         QBMAN_BUG_ON((*v & cmd_verb) != *v);
392         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
393                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
394                 *v = cmd_verb | p->mr.valid_bit;
395                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
396                 dma_wmb();
397                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
398         } else {
399                 dma_wmb();
400                 *v = cmd_verb | p->mc.valid_bit;
401                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
402                 clean(cmd);
403         }
404 #ifdef QBMAN_CHECKING
405         p->mc.check = swp_mc_can_poll;
406 #endif
407 }
408
409 void *qbman_swp_mc_result(struct qbman_swp *p)
410 {
411         uint32_t *ret, verb;
412 #ifdef QBMAN_CHECKING
413         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
414 #endif
415         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
416                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
417                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
418                 /* Command completed if the valid bit is toggled */
419                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
420                         return NULL;
421                 /* Remove the valid-bit -
422                  * command completed iff the rest is non-zero
423                  */
424                 verb = ret[0] & ~QB_VALID_BIT;
425                 if (!verb)
426                         return NULL;
427                 p->mr.valid_bit ^= QB_VALID_BIT;
428         } else {
429                 qbman_cena_invalidate_prefetch(&p->sys,
430                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
431                 ret = qbman_cena_read(&p->sys,
432                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
433                 /* Remove the valid-bit -
434                  * command completed iff the rest is non-zero
435                  */
436                 verb = ret[0] & ~QB_VALID_BIT;
437                 if (!verb)
438                         return NULL;
439                 p->mc.valid_bit ^= QB_VALID_BIT;
440         }
441 #ifdef QBMAN_CHECKING
442         p->mc.check = swp_mc_can_start;
443 #endif
444         return ret;
445 }
446
447 /***********/
448 /* Enqueue */
449 /***********/
450
451 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
452 enum qb_enqueue_commands {
453         enqueue_empty = 0,
454         enqueue_response_always = 1,
455         enqueue_rejects_to_fq = 2
456 };
457
458 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
459 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
460 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
461 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
462 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
463 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
464 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
465 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
466
467 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
468 {
469         memset(d, 0, sizeof(*d));
470 }
471
472 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
473 {
474         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
475         if (respond_success)
476                 d->eq.verb |= enqueue_response_always;
477         else
478                 d->eq.verb |= enqueue_rejects_to_fq;
479 }
480
481 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
482                            uint16_t opr_id, uint16_t seqnum, int incomplete)
483 {
484         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
485         if (respond_success)
486                 d->eq.verb |= enqueue_response_always;
487         else
488                 d->eq.verb |= enqueue_rejects_to_fq;
489
490         d->eq.orpid = opr_id;
491         d->eq.seqnum = seqnum;
492         if (incomplete)
493                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
494         else
495                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
496 }
497
498 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
499                                 uint16_t seqnum)
500 {
501         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
502         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
503         d->eq.orpid = opr_id;
504         d->eq.seqnum = seqnum;
505         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
506         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
507 }
508
509 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
510                                 uint16_t seqnum)
511 {
512         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
513         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
514         d->eq.orpid = opr_id;
515         d->eq.seqnum = seqnum;
516         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
517         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
518 }
519
520 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
521                                 dma_addr_t storage_phys,
522                                 int stash)
523 {
524         d->eq.rsp_addr = storage_phys;
525         d->eq.wae = stash;
526 }
527
528 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
529 {
530         d->eq.rspid = token;
531 }
532
533 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
534 {
535         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
536         d->eq.tgtid = fqid;
537 }
538
539 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
540                           uint16_t qd_bin, uint8_t qd_prio)
541 {
542         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
543         d->eq.tgtid = qdid;
544         d->eq.qdbin = qd_bin;
545         d->eq.qpri = qd_prio;
546 }
547
548 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
549 {
550         if (enable)
551                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
552         else
553                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
554 }
555
556 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
557                            uint8_t dqrr_idx, int park)
558 {
559         if (enable) {
560                 d->eq.dca = dqrr_idx;
561                 if (park)
562                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
563                 else
564                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
565                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
566         } else {
567                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
568         }
569 }
570
571 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
572 #define EQAR_VB(eqar)      ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
576                                                    uint8_t idx)
577 {
578         if (idx < 16)
579                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
580                                      QMAN_RT_MODE);
581         else
582                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
583                                      (idx - 16) * 4,
584                                      QMAN_RT_MODE);
585 }
586
587
588 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
589                                                const struct qbman_eq_desc *d,
590                                                const struct qbman_fd *fd)
591 {
592         uint32_t *p;
593         const uint32_t *cl = qb_cl(d);
594         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
595
596         pr_debug("EQAR=%08x\n", eqar);
597         if (!EQAR_SUCCESS(eqar))
598                 return -EBUSY;
599         p = qbman_cena_write_start_wo_shadow(&s->sys,
600                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
601         memcpy(&p[1], &cl[1], 28);
602         memcpy(&p[8], fd, sizeof(*fd));
603
604         /* Set the verb byte, have to substitute in the valid-bit */
605         dma_wmb();
606         p[0] = cl[0] | EQAR_VB(eqar);
607         qbman_cena_write_complete_wo_shadow(&s->sys,
608                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
609         return 0;
610 }
611 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
612                                                  const struct qbman_eq_desc *d,
613                                                  const struct qbman_fd *fd)
614 {
615         uint32_t *p;
616         const uint32_t *cl = qb_cl(d);
617         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
618
619         pr_debug("EQAR=%08x\n", eqar);
620         if (!EQAR_SUCCESS(eqar))
621                 return -EBUSY;
622         p = qbman_cena_write_start_wo_shadow(&s->sys,
623                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
624         memcpy(&p[1], &cl[1], 28);
625         memcpy(&p[8], fd, sizeof(*fd));
626
627         /* Set the verb byte, have to substitute in the valid-bit */
628         p[0] = cl[0] | EQAR_VB(eqar);
629         dma_wmb();
630         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
631         return 0;
632 }
633
634 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
635                                                const struct qbman_eq_desc *d,
636                                                const struct qbman_fd *fd)
637 {
638         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
639 }
640
641 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
642                                               const struct qbman_eq_desc *d,
643                                               const struct qbman_fd *fd)
644 {
645         uint32_t *p;
646         const uint32_t *cl = qb_cl(d);
647         uint32_t eqcr_ci, full_mask, half_mask;
648
649         half_mask = (s->eqcr.pi_mask>>1);
650         full_mask = s->eqcr.pi_mask;
651         if (!s->eqcr.available) {
652                 eqcr_ci = s->eqcr.ci;
653                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
654                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
655                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
656                                 eqcr_ci, s->eqcr.ci);
657                 if (!s->eqcr.available)
658                         return -EBUSY;
659         }
660
661         p = qbman_cena_write_start_wo_shadow(&s->sys,
662                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
663         memcpy(&p[1], &cl[1], 28);
664         memcpy(&p[8], fd, sizeof(*fd));
665         lwsync();
666
667         /* Set the verb byte, have to substitute in the valid-bit */
668         p[0] = cl[0] | s->eqcr.pi_vb;
669         qbman_cena_write_complete_wo_shadow(&s->sys,
670                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
671         s->eqcr.pi++;
672         s->eqcr.pi &= full_mask;
673         s->eqcr.available--;
674         if (!(s->eqcr.pi & half_mask))
675                 s->eqcr.pi_vb ^= QB_VALID_BIT;
676
677         return 0;
678 }
679
680 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
681                                                 const struct qbman_eq_desc *d,
682                                                 const struct qbman_fd *fd)
683 {
684         uint32_t *p;
685         const uint32_t *cl = qb_cl(d);
686         uint32_t eqcr_ci, full_mask, half_mask;
687
688         half_mask = (s->eqcr.pi_mask>>1);
689         full_mask = s->eqcr.pi_mask;
690         if (!s->eqcr.available) {
691                 eqcr_ci = s->eqcr.ci;
692                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
693                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
694                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
695                                 eqcr_ci, s->eqcr.ci);
696                 if (!s->eqcr.available)
697                         return -EBUSY;
698         }
699
700         p = qbman_cena_write_start_wo_shadow(&s->sys,
701                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
702         memcpy(&p[1], &cl[1], 28);
703         memcpy(&p[8], fd, sizeof(*fd));
704
705         /* Set the verb byte, have to substitute in the valid-bit */
706         p[0] = cl[0] | s->eqcr.pi_vb;
707         s->eqcr.pi++;
708         s->eqcr.pi &= full_mask;
709         s->eqcr.available--;
710         if (!(s->eqcr.pi & half_mask))
711                 s->eqcr.pi_vb ^= QB_VALID_BIT;
712         dma_wmb();
713         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
714                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
715         return 0;
716 }
717
718 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
719                                        const struct qbman_eq_desc *d,
720                                        const struct qbman_fd *fd)
721 {
722         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
723 }
724
725 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
726                       const struct qbman_fd *fd)
727 {
728         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
729                 return qbman_swp_enqueue_array_mode(s, d, fd);
730         else    /* Use ring mode by default */
731                 return qbman_swp_enqueue_ring_mode(s, d, fd);
732 }
733
734 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
735                                              const struct qbman_eq_desc *d,
736                                              const struct qbman_fd *fd,
737                                              uint32_t *flags,
738                                              int num_frames)
739 {
740         uint32_t *p = NULL;
741         const uint32_t *cl = qb_cl(d);
742         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
743         int i, num_enqueued = 0;
744         uint64_t addr_cena;
745
746         half_mask = (s->eqcr.pi_mask>>1);
747         full_mask = s->eqcr.pi_mask;
748         if (!s->eqcr.available) {
749                 eqcr_ci = s->eqcr.ci;
750                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
751                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
752                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
753                                 eqcr_ci, s->eqcr.ci);
754                 if (!s->eqcr.available)
755                         return 0;
756         }
757
758         eqcr_pi = s->eqcr.pi;
759         num_enqueued = (s->eqcr.available < num_frames) ?
760                         s->eqcr.available : num_frames;
761         s->eqcr.available -= num_enqueued;
762         /* Fill in the EQCR ring */
763         for (i = 0; i < num_enqueued; i++) {
764                 p = qbman_cena_write_start_wo_shadow(&s->sys,
765                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
766                 memcpy(&p[1], &cl[1], 28);
767                 memcpy(&p[8], &fd[i], sizeof(*fd));
768                 eqcr_pi++;
769         }
770
771         lwsync();
772
773         /* Set the verb byte, have to substitute in the valid-bit */
774         eqcr_pi = s->eqcr.pi;
775         for (i = 0; i < num_enqueued; i++) {
776                 p = qbman_cena_write_start_wo_shadow(&s->sys,
777                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
778                 p[0] = cl[0] | s->eqcr.pi_vb;
779                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
780                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
781
782                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
783                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
784                 }
785                 eqcr_pi++;
786                 if (!(eqcr_pi & half_mask))
787                         s->eqcr.pi_vb ^= QB_VALID_BIT;
788         }
789
790         /* Flush all the cacheline without load/store in between */
791         eqcr_pi = s->eqcr.pi;
792         addr_cena = (size_t)s->sys.addr_cena;
793         for (i = 0; i < num_enqueued; i++) {
794                 dcbf((uintptr_t)(addr_cena +
795                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
796                 eqcr_pi++;
797         }
798         s->eqcr.pi = eqcr_pi & full_mask;
799
800         return num_enqueued;
801 }
802
803 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
804                                                const struct qbman_eq_desc *d,
805                                                const struct qbman_fd *fd,
806                                                uint32_t *flags,
807                                                int num_frames)
808 {
809         uint32_t *p = NULL;
810         const uint32_t *cl = qb_cl(d);
811         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
812         int i, num_enqueued = 0;
813
814         half_mask = (s->eqcr.pi_mask>>1);
815         full_mask = s->eqcr.pi_mask;
816         if (!s->eqcr.available) {
817                 eqcr_ci = s->eqcr.ci;
818                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
819                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
820                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
821                                         eqcr_ci, s->eqcr.ci);
822                 if (!s->eqcr.available)
823                         return 0;
824         }
825
826         eqcr_pi = s->eqcr.pi;
827         num_enqueued = (s->eqcr.available < num_frames) ?
828                         s->eqcr.available : num_frames;
829         s->eqcr.available -= num_enqueued;
830         /* Fill in the EQCR ring */
831         for (i = 0; i < num_enqueued; i++) {
832                 p = qbman_cena_write_start_wo_shadow(&s->sys,
833                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
834                 memcpy(&p[1], &cl[1], 28);
835                 memcpy(&p[8], &fd[i], sizeof(*fd));
836                 eqcr_pi++;
837         }
838
839         /* Set the verb byte, have to substitute in the valid-bit */
840         eqcr_pi = s->eqcr.pi;
841         for (i = 0; i < num_enqueued; i++) {
842                 p = qbman_cena_write_start_wo_shadow(&s->sys,
843                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
844                 p[0] = cl[0] | s->eqcr.pi_vb;
845                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
846                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
847
848                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
849                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
850                 }
851                 eqcr_pi++;
852                 if (!(eqcr_pi & half_mask))
853                         s->eqcr.pi_vb ^= QB_VALID_BIT;
854         }
855         s->eqcr.pi = eqcr_pi & full_mask;
856
857         dma_wmb();
858         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
859                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
860         return num_enqueued;
861 }
862
863 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
864                                       const struct qbman_eq_desc *d,
865                                       const struct qbman_fd *fd,
866                                       uint32_t *flags,
867                                       int num_frames)
868 {
869         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
870 }
871
872 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
873                                         const struct qbman_eq_desc *d,
874                                         const struct qbman_fd *fd,
875                                         int num_frames)
876 {
877         uint32_t *p;
878         const uint32_t *cl;
879         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
880         int i, num_enqueued = 0;
881         uint64_t addr_cena;
882
883         half_mask = (s->eqcr.pi_mask>>1);
884         full_mask = s->eqcr.pi_mask;
885         if (!s->eqcr.available) {
886                 eqcr_ci = s->eqcr.ci;
887                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
888                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
889                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
890                                         eqcr_ci, s->eqcr.ci);
891                 if (!s->eqcr.available)
892                         return 0;
893         }
894
895         eqcr_pi = s->eqcr.pi;
896         num_enqueued = (s->eqcr.available < num_frames) ?
897                         s->eqcr.available : num_frames;
898         s->eqcr.available -= num_enqueued;
899         /* Fill in the EQCR ring */
900         for (i = 0; i < num_enqueued; i++) {
901                 p = qbman_cena_write_start_wo_shadow(&s->sys,
902                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
903                 cl = qb_cl(&d[i]);
904                 memcpy(&p[1], &cl[1], 28);
905                 memcpy(&p[8], &fd[i], sizeof(*fd));
906                 eqcr_pi++;
907         }
908
909         lwsync();
910
911         /* Set the verb byte, have to substitute in the valid-bit */
912         eqcr_pi = s->eqcr.pi;
913         for (i = 0; i < num_enqueued; i++) {
914                 p = qbman_cena_write_start_wo_shadow(&s->sys,
915                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
916                 cl = qb_cl(&d[i]);
917                 p[0] = cl[0] | s->eqcr.pi_vb;
918                 eqcr_pi++;
919                 if (!(eqcr_pi & half_mask))
920                         s->eqcr.pi_vb ^= QB_VALID_BIT;
921         }
922
923         /* Flush all the cacheline without load/store in between */
924         eqcr_pi = s->eqcr.pi;
925         addr_cena = (size_t)s->sys.addr_cena;
926         for (i = 0; i < num_enqueued; i++) {
927                 dcbf((uintptr_t)(addr_cena +
928                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
929                 eqcr_pi++;
930         }
931         s->eqcr.pi = eqcr_pi & full_mask;
932
933         return num_enqueued;
934 }
935
936 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
937                                         const struct qbman_eq_desc *d,
938                                         const struct qbman_fd *fd,
939                                         int num_frames)
940 {
941         uint32_t *p;
942         const uint32_t *cl;
943         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
944         int i, num_enqueued = 0;
945
946         half_mask = (s->eqcr.pi_mask>>1);
947         full_mask = s->eqcr.pi_mask;
948         if (!s->eqcr.available) {
949                 eqcr_ci = s->eqcr.ci;
950                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
951                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
952                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
953                                         eqcr_ci, s->eqcr.ci);
954                 if (!s->eqcr.available)
955                         return 0;
956         }
957
958         eqcr_pi = s->eqcr.pi;
959         num_enqueued = (s->eqcr.available < num_frames) ?
960                         s->eqcr.available : num_frames;
961         s->eqcr.available -= num_enqueued;
962         /* Fill in the EQCR ring */
963         for (i = 0; i < num_enqueued; i++) {
964                 p = qbman_cena_write_start_wo_shadow(&s->sys,
965                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
966                 cl = qb_cl(&d[i]);
967                 memcpy(&p[1], &cl[1], 28);
968                 memcpy(&p[8], &fd[i], sizeof(*fd));
969                 eqcr_pi++;
970         }
971
972         /* Set the verb byte, have to substitute in the valid-bit */
973         eqcr_pi = s->eqcr.pi;
974         for (i = 0; i < num_enqueued; i++) {
975                 p = qbman_cena_write_start_wo_shadow(&s->sys,
976                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
977                 cl = qb_cl(&d[i]);
978                 p[0] = cl[0] | s->eqcr.pi_vb;
979                 eqcr_pi++;
980                 if (!(eqcr_pi & half_mask))
981                         s->eqcr.pi_vb ^= QB_VALID_BIT;
982         }
983
984         s->eqcr.pi = eqcr_pi & full_mask;
985
986         dma_wmb();
987         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
988                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
989
990         return num_enqueued;
991 }
992 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
993                                            const struct qbman_eq_desc *d,
994                                            const struct qbman_fd *fd,
995                                            int num_frames)
996 {
997         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
998 }
999
1000 /*************************/
1001 /* Static (push) dequeue */
1002 /*************************/
1003
1004 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1005 {
1006         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1007
1008         QBMAN_BUG_ON(channel_idx > 15);
1009         *enabled = src | (1 << channel_idx);
1010 }
1011
1012 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1013 {
1014         uint16_t dqsrc;
1015
1016         QBMAN_BUG_ON(channel_idx > 15);
1017         if (enable)
1018                 s->sdq |= 1 << channel_idx;
1019         else
1020                 s->sdq &= ~(1 << channel_idx);
1021
1022         /* Read make the complete src map.  If no channels are enabled
1023          * the SDQCR must be 0 or else QMan will assert errors
1024          */
1025         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1026         if (dqsrc != 0)
1027                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1028         else
1029                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1030 }
1031
1032 /***************************/
1033 /* Volatile (pull) dequeue */
1034 /***************************/
1035
1036 /* These should be const, eventually */
1037 #define QB_VDQCR_VERB_DCT_SHIFT    0
1038 #define QB_VDQCR_VERB_DT_SHIFT     2
1039 #define QB_VDQCR_VERB_RLS_SHIFT    4
1040 #define QB_VDQCR_VERB_WAE_SHIFT    5
1041 #define QB_VDQCR_VERB_RAD_SHIFT    6
1042
1043 enum qb_pull_dt_e {
1044         qb_pull_dt_channel,
1045         qb_pull_dt_workqueue,
1046         qb_pull_dt_framequeue
1047 };
1048
1049 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1050 {
1051         memset(d, 0, sizeof(*d));
1052 }
1053
1054 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1055                                  struct qbman_result *storage,
1056                                  dma_addr_t storage_phys,
1057                                  int stash)
1058 {
1059         d->pull.rsp_addr_virt = (size_t)storage;
1060
1061         if (!storage) {
1062                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1063                 return;
1064         }
1065         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1066         if (stash)
1067                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1068         else
1069                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1070
1071         d->pull.rsp_addr = storage_phys;
1072 }
1073
1074 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1075                                    uint8_t numframes)
1076 {
1077         d->pull.numf = numframes - 1;
1078 }
1079
1080 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1081 {
1082         d->pull.tok = token;
1083 }
1084
1085 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1086 {
1087         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1088         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1089         d->pull.dq_src = fqid;
1090 }
1091
1092 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1093                             enum qbman_pull_type_e dct)
1094 {
1095         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1096         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1097         d->pull.dq_src = wqid;
1098 }
1099
1100 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1101                                  enum qbman_pull_type_e dct)
1102 {
1103         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1104         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1105         d->pull.dq_src = chid;
1106 }
1107
1108 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1109 {
1110         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1111                 if (rad)
1112                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1113                 else
1114                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1115         } else {
1116                 printf("The RAD feature is not valid when RLS = 0\n");
1117         }
1118 }
1119
1120 static int qbman_swp_pull_direct(struct qbman_swp *s,
1121                                  struct qbman_pull_desc *d)
1122 {
1123         uint32_t *p;
1124         uint32_t *cl = qb_cl(d);
1125
1126         if (!atomic_dec_and_test(&s->vdq.busy)) {
1127                 atomic_inc(&s->vdq.busy);
1128                 return -EBUSY;
1129         }
1130
1131         d->pull.tok = s->sys.idx + 1;
1132         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1133         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1134         memcpy(&p[1], &cl[1], 12);
1135
1136         /* Set the verb byte, have to substitute in the valid-bit */
1137         lwsync();
1138         p[0] = cl[0] | s->vdq.valid_bit;
1139         s->vdq.valid_bit ^= QB_VALID_BIT;
1140         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1141
1142         return 0;
1143 }
1144
1145 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1146                                    struct qbman_pull_desc *d)
1147 {
1148         uint32_t *p;
1149         uint32_t *cl = qb_cl(d);
1150
1151         if (!atomic_dec_and_test(&s->vdq.busy)) {
1152                 atomic_inc(&s->vdq.busy);
1153                 return -EBUSY;
1154         }
1155
1156         d->pull.tok = s->sys.idx + 1;
1157         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1158         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1159         memcpy(&p[1], &cl[1], 12);
1160
1161         /* Set the verb byte, have to substitute in the valid-bit */
1162         p[0] = cl[0] | s->vdq.valid_bit;
1163         s->vdq.valid_bit ^= QB_VALID_BIT;
1164         dma_wmb();
1165         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1166
1167         return 0;
1168 }
1169
1170 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1171 {
1172         return qbman_swp_pull_ptr(s, d);
1173 }
1174
1175 /****************/
1176 /* Polling DQRR */
1177 /****************/
1178
1179 #define QMAN_DQRR_PI_MASK              0xf
1180
1181 #define QBMAN_RESULT_DQ        0x60
1182 #define QBMAN_RESULT_FQRN      0x21
1183 #define QBMAN_RESULT_FQRNI     0x22
1184 #define QBMAN_RESULT_FQPN      0x24
1185 #define QBMAN_RESULT_FQDAN     0x25
1186 #define QBMAN_RESULT_CDAN      0x26
1187 #define QBMAN_RESULT_CSCN_MEM  0x27
1188 #define QBMAN_RESULT_CGCU      0x28
1189 #define QBMAN_RESULT_BPSCN     0x29
1190 #define QBMAN_RESULT_CSCN_WQ   0x2a
1191
1192 #include <rte_prefetch.h>
1193
1194 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1195 {
1196         const struct qbman_result *p;
1197
1198         p = qbman_cena_read_wo_shadow(&s->sys,
1199                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1200         rte_prefetch0(p);
1201 }
1202
1203 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1204  * only once, so repeated calls can return a sequence of DQRR entries, without
1205  * requiring they be consumed immediately or in any particular order.
1206  */
1207 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1208 {
1209         return qbman_swp_dqrr_next_ptr(s);
1210 }
1211
1212 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1213 {
1214         uint32_t verb;
1215         uint32_t response_verb;
1216         uint32_t flags;
1217         const struct qbman_result *p;
1218
1219         /* Before using valid-bit to detect if something is there, we have to
1220          * handle the case of the DQRR reset bug...
1221          */
1222         if (s->dqrr.reset_bug) {
1223                 /* We pick up new entries by cache-inhibited producer index,
1224                  * which means that a non-coherent mapping would require us to
1225                  * invalidate and read *only* once that PI has indicated that
1226                  * there's an entry here. The first trip around the DQRR ring
1227                  * will be much less efficient than all subsequent trips around
1228                  * it...
1229                  */
1230                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1231                              QMAN_DQRR_PI_MASK;
1232
1233                 /* there are new entries if pi != next_idx */
1234                 if (pi == s->dqrr.next_idx)
1235                         return NULL;
1236
1237                 /* if next_idx is/was the last ring index, and 'pi' is
1238                  * different, we can disable the workaround as all the ring
1239                  * entries have now been DMA'd to so valid-bit checking is
1240                  * repaired. Note: this logic needs to be based on next_idx
1241                  * (which increments one at a time), rather than on pi (which
1242                  * can burst and wrap-around between our snapshots of it).
1243                  */
1244                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1245                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1246                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1247                                  s->dqrr.next_idx, pi);
1248                         s->dqrr.reset_bug = 0;
1249                 }
1250                 qbman_cena_invalidate_prefetch(&s->sys,
1251                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1252         }
1253         p = qbman_cena_read_wo_shadow(&s->sys,
1254                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1255
1256         verb = p->dq.verb;
1257
1258         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1259          * in the DQRR reset bug workaround, we shouldn't need to skip these
1260          * check, because we've already determined that a new entry is available
1261          * and we've invalidated the cacheline before reading it, so the
1262          * valid-bit behaviour is repaired and should tell us what we already
1263          * knew from reading PI.
1264          */
1265         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1266                 return NULL;
1267
1268         /* There's something there. Move "next_idx" attention to the next ring
1269          * entry (and prefetch it) before returning what we found.
1270          */
1271         s->dqrr.next_idx++;
1272         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1273                 s->dqrr.next_idx = 0;
1274                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1275         }
1276         /* If this is the final response to a volatile dequeue command
1277          * indicate that the vdq is no longer busy
1278          */
1279         flags = p->dq.stat;
1280         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1281         if ((response_verb == QBMAN_RESULT_DQ) &&
1282             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1283             (flags & QBMAN_DQ_STAT_EXPIRED))
1284                 atomic_inc(&s->vdq.busy);
1285
1286         return p;
1287 }
1288
1289 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1290 {
1291         uint32_t verb;
1292         uint32_t response_verb;
1293         uint32_t flags;
1294         const struct qbman_result *p;
1295
1296         p = qbman_cena_read_wo_shadow(&s->sys,
1297                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1298
1299         verb = p->dq.verb;
1300
1301         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1302          * in the DQRR reset bug workaround, we shouldn't need to skip these
1303          * check, because we've already determined that a new entry is available
1304          * and we've invalidated the cacheline before reading it, so the
1305          * valid-bit behaviour is repaired and should tell us what we already
1306          * knew from reading PI.
1307          */
1308         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1309                 return NULL;
1310
1311         /* There's something there. Move "next_idx" attention to the next ring
1312          * entry (and prefetch it) before returning what we found.
1313          */
1314         s->dqrr.next_idx++;
1315         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1316                 s->dqrr.next_idx = 0;
1317                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1318         }
1319         /* If this is the final response to a volatile dequeue command
1320          * indicate that the vdq is no longer busy
1321          */
1322         flags = p->dq.stat;
1323         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1324         if ((response_verb == QBMAN_RESULT_DQ)
1325                         && (flags & QBMAN_DQ_STAT_VOLATILE)
1326                         && (flags & QBMAN_DQ_STAT_EXPIRED))
1327                 atomic_inc(&s->vdq.busy);
1328         return p;
1329 }
1330
1331 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1332 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1333                             const struct qbman_result *dq)
1334 {
1335         qbman_cinh_write(&s->sys,
1336                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1337 }
1338
1339 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1340 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1341                             uint8_t dqrr_index)
1342 {
1343         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1344 }
1345
1346 /*********************************/
1347 /* Polling user-provided storage */
1348 /*********************************/
1349
1350 int qbman_result_has_new_result(struct qbman_swp *s,
1351                                 struct qbman_result *dq)
1352 {
1353         if (dq->dq.tok == 0)
1354                 return 0;
1355
1356         /*
1357          * Set token to be 0 so we will detect change back to 1
1358          * next time the looping is traversed. Const is cast away here
1359          * as we want users to treat the dequeue responses as read only.
1360          */
1361         ((struct qbman_result *)dq)->dq.tok = 0;
1362
1363         /*
1364          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1365          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1366          * that makes it available. Eg. we may be looking at our 10th dequeue
1367          * result, having released VDQCR after the 1st result and it is now
1368          * busy due to some other command!
1369          */
1370         if (s->vdq.storage == dq) {
1371                 s->vdq.storage = NULL;
1372                 atomic_inc(&s->vdq.busy);
1373         }
1374
1375         return 1;
1376 }
1377
1378 int qbman_check_new_result(struct qbman_result *dq)
1379 {
1380         if (dq->dq.tok == 0)
1381                 return 0;
1382
1383         /*
1384          * Set token to be 0 so we will detect change back to 1
1385          * next time the looping is traversed. Const is cast away here
1386          * as we want users to treat the dequeue responses as read only.
1387          */
1388         ((struct qbman_result *)dq)->dq.tok = 0;
1389
1390         return 1;
1391 }
1392
1393 int qbman_check_command_complete(struct qbman_result *dq)
1394 {
1395         struct qbman_swp *s;
1396
1397         if (dq->dq.tok == 0)
1398                 return 0;
1399
1400         s = portal_idx_map[dq->dq.tok - 1];
1401         /*
1402          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1403          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1404          * that makes it available. Eg. we may be looking at our 10th dequeue
1405          * result, having released VDQCR after the 1st result and it is now
1406          * busy due to some other command!
1407          */
1408         if (s->vdq.storage == dq) {
1409                 s->vdq.storage = NULL;
1410                 atomic_inc(&s->vdq.busy);
1411         }
1412
1413         return 1;
1414 }
1415
1416 /********************************/
1417 /* Categorising qbman results   */
1418 /********************************/
1419
1420 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1421                                       uint8_t x)
1422 {
1423         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1424
1425         return (response_verb == x);
1426 }
1427
1428 int qbman_result_is_DQ(const struct qbman_result *dq)
1429 {
1430         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1431 }
1432
1433 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1434 {
1435         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1436 }
1437
1438 int qbman_result_is_CDAN(const struct qbman_result *dq)
1439 {
1440         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1441 }
1442
1443 int qbman_result_is_CSCN(const struct qbman_result *dq)
1444 {
1445         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1446                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1447 }
1448
1449 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1450 {
1451         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1452 }
1453
1454 int qbman_result_is_CGCU(const struct qbman_result *dq)
1455 {
1456         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1457 }
1458
1459 int qbman_result_is_FQRN(const struct qbman_result *dq)
1460 {
1461         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1462 }
1463
1464 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1465 {
1466         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1467 }
1468
1469 int qbman_result_is_FQPN(const struct qbman_result *dq)
1470 {
1471         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1472 }
1473
1474 /*********************************/
1475 /* Parsing frame dequeue results */
1476 /*********************************/
1477
1478 /* These APIs assume qbman_result_is_DQ() is TRUE */
1479
1480 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1481 {
1482         return dq->dq.stat;
1483 }
1484
1485 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1486 {
1487         return dq->dq.seqnum;
1488 }
1489
1490 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1491 {
1492         return dq->dq.oprid;
1493 }
1494
1495 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1496 {
1497         return dq->dq.fqid;
1498 }
1499
1500 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1501 {
1502         return dq->dq.fq_byte_cnt;
1503 }
1504
1505 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1506 {
1507         return dq->dq.fq_frm_cnt;
1508 }
1509
1510 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1511 {
1512         return dq->dq.fqd_ctx;
1513 }
1514
1515 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1516 {
1517         return (const struct qbman_fd *)&dq->dq.fd[0];
1518 }
1519
1520 /**************************************/
1521 /* Parsing state-change notifications */
1522 /**************************************/
1523 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1524 {
1525         return scn->scn.state;
1526 }
1527
1528 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1529 {
1530         return scn->scn.rid_tok;
1531 }
1532
1533 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1534 {
1535         return scn->scn.ctx;
1536 }
1537
1538 /*****************/
1539 /* Parsing BPSCN */
1540 /*****************/
1541 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1542 {
1543         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1544 }
1545
1546 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1547 {
1548         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1549 }
1550
1551 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1552 {
1553         return (int)(qbman_result_SCN_state(scn) & 0x2);
1554 }
1555
1556 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1557 {
1558         return (int)(qbman_result_SCN_state(scn) & 0x4);
1559 }
1560
1561 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1562 {
1563         return qbman_result_SCN_ctx(scn);
1564 }
1565
1566 /*****************/
1567 /* Parsing CGCU  */
1568 /*****************/
1569 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1570 {
1571         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1572 }
1573
1574 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1575 {
1576         return qbman_result_SCN_ctx(scn);
1577 }
1578
1579 /******************/
1580 /* Buffer release */
1581 /******************/
1582 #define QB_BR_RC_VALID_SHIFT  5
1583 #define QB_BR_RCDI_SHIFT      6
1584
1585 void qbman_release_desc_clear(struct qbman_release_desc *d)
1586 {
1587         memset(d, 0, sizeof(*d));
1588         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1589 }
1590
1591 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1592 {
1593         d->br.bpid = bpid;
1594 }
1595
1596 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1597 {
1598         if (enable)
1599                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1600         else
1601                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1602 }
1603
1604 #define RAR_IDX(rar)     ((rar) & 0x7)
1605 #define RAR_VB(rar)      ((rar) & 0x80)
1606 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1607
1608 static int qbman_swp_release_direct(struct qbman_swp *s,
1609                                     const struct qbman_release_desc *d,
1610                                     const uint64_t *buffers,
1611                                     unsigned int num_buffers)
1612 {
1613         uint32_t *p;
1614         const uint32_t *cl = qb_cl(d);
1615         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1616
1617         pr_debug("RAR=%08x\n", rar);
1618         if (!RAR_SUCCESS(rar))
1619                 return -EBUSY;
1620
1621         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1622
1623         /* Start the release command */
1624         p = qbman_cena_write_start_wo_shadow(&s->sys,
1625                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1626
1627         /* Copy the caller's buffer pointers to the command */
1628         u64_to_le32_copy(&p[2], buffers, num_buffers);
1629
1630         /* Set the verb byte, have to substitute in the valid-bit and the
1631          * number of buffers.
1632          */
1633         lwsync();
1634         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1635         qbman_cena_write_complete_wo_shadow(&s->sys,
1636                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1637
1638         return 0;
1639 }
1640
1641 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1642                                       const struct qbman_release_desc *d,
1643                                       const uint64_t *buffers,
1644                                       unsigned int num_buffers)
1645 {
1646         uint32_t *p;
1647         const uint32_t *cl = qb_cl(d);
1648         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1649
1650         pr_debug("RAR=%08x\n", rar);
1651         if (!RAR_SUCCESS(rar))
1652                 return -EBUSY;
1653
1654         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1655
1656         /* Start the release command */
1657         p = qbman_cena_write_start_wo_shadow(&s->sys,
1658                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1659
1660         /* Copy the caller's buffer pointers to the command */
1661         u64_to_le32_copy(&p[2], buffers, num_buffers);
1662
1663         /* Set the verb byte, have to substitute in the valid-bit and the
1664          * number of buffers.
1665          */
1666         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1667         lwsync();
1668         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1669                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1670
1671         return 0;
1672 }
1673
1674 inline int qbman_swp_release(struct qbman_swp *s,
1675                              const struct qbman_release_desc *d,
1676                              const uint64_t *buffers,
1677                              unsigned int num_buffers)
1678 {
1679         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1680 }
1681
1682 /*******************/
1683 /* Buffer acquires */
1684 /*******************/
1685 struct qbman_acquire_desc {
1686         uint8_t verb;
1687         uint8_t reserved;
1688         uint16_t bpid;
1689         uint8_t num;
1690         uint8_t reserved2[59];
1691 };
1692
1693 struct qbman_acquire_rslt {
1694         uint8_t verb;
1695         uint8_t rslt;
1696         uint16_t reserved;
1697         uint8_t num;
1698         uint8_t reserved2[3];
1699         uint64_t buf[7];
1700 };
1701
1702 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1703                       unsigned int num_buffers)
1704 {
1705         struct qbman_acquire_desc *p;
1706         struct qbman_acquire_rslt *r;
1707
1708         if (!num_buffers || (num_buffers > 7))
1709                 return -EINVAL;
1710
1711         /* Start the management command */
1712         p = qbman_swp_mc_start(s);
1713
1714         if (!p)
1715                 return -EBUSY;
1716
1717         /* Encode the caller-provided attributes */
1718         p->bpid = bpid;
1719         p->num = num_buffers;
1720
1721         /* Complete the management command */
1722         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1723         if (!r) {
1724                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1725                        bpid);
1726                 return -EIO;
1727         }
1728
1729         /* Decode the outcome */
1730         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1731
1732         /* Determine success or failure */
1733         if (r->rslt != QBMAN_MC_RSLT_OK) {
1734                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1735                        bpid, r->rslt);
1736                 return -EIO;
1737         }
1738
1739         QBMAN_BUG_ON(r->num > num_buffers);
1740
1741         /* Copy the acquired buffers to the caller's array */
1742         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1743
1744         return (int)r->num;
1745 }
1746
1747 /*****************/
1748 /* FQ management */
1749 /*****************/
1750 struct qbman_alt_fq_state_desc {
1751         uint8_t verb;
1752         uint8_t reserved[3];
1753         uint32_t fqid;
1754         uint8_t reserved2[56];
1755 };
1756
1757 struct qbman_alt_fq_state_rslt {
1758         uint8_t verb;
1759         uint8_t rslt;
1760         uint8_t reserved[62];
1761 };
1762
1763 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1764
1765 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1766                                   uint8_t alt_fq_verb)
1767 {
1768         struct qbman_alt_fq_state_desc *p;
1769         struct qbman_alt_fq_state_rslt *r;
1770
1771         /* Start the management command */
1772         p = qbman_swp_mc_start(s);
1773         if (!p)
1774                 return -EBUSY;
1775
1776         p->fqid = fqid & ALT_FQ_FQID_MASK;
1777
1778         /* Complete the management command */
1779         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1780         if (!r) {
1781                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1782                        alt_fq_verb);
1783                 return -EIO;
1784         }
1785
1786         /* Decode the outcome */
1787         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1788
1789         /* Determine success or failure */
1790         if (r->rslt != QBMAN_MC_RSLT_OK) {
1791                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1792                        fqid, alt_fq_verb, r->rslt);
1793                 return -EIO;
1794         }
1795
1796         return 0;
1797 }
1798
1799 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1800 {
1801         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1802 }
1803
1804 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1805 {
1806         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1807 }
1808
1809 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1810 {
1811         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1812 }
1813
1814 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1815 {
1816         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1817 }
1818
1819 /**********************/
1820 /* Channel management */
1821 /**********************/
1822
1823 struct qbman_cdan_ctrl_desc {
1824         uint8_t verb;
1825         uint8_t reserved;
1826         uint16_t ch;
1827         uint8_t we;
1828         uint8_t ctrl;
1829         uint16_t reserved2;
1830         uint64_t cdan_ctx;
1831         uint8_t reserved3[48];
1832
1833 };
1834
1835 struct qbman_cdan_ctrl_rslt {
1836         uint8_t verb;
1837         uint8_t rslt;
1838         uint16_t ch;
1839         uint8_t reserved[60];
1840 };
1841
1842 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1843  * would be irresponsible to expose it.
1844  */
1845 #define CODE_CDAN_WE_EN    0x1
1846 #define CODE_CDAN_WE_CTX   0x4
1847
1848 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1849                               uint8_t we_mask, uint8_t cdan_en,
1850                               uint64_t ctx)
1851 {
1852         struct qbman_cdan_ctrl_desc *p;
1853         struct qbman_cdan_ctrl_rslt *r;
1854
1855         /* Start the management command */
1856         p = qbman_swp_mc_start(s);
1857         if (!p)
1858                 return -EBUSY;
1859
1860         /* Encode the caller-provided attributes */
1861         p->ch = channelid;
1862         p->we = we_mask;
1863         if (cdan_en)
1864                 p->ctrl = 1;
1865         else
1866                 p->ctrl = 0;
1867         p->cdan_ctx = ctx;
1868
1869         /* Complete the management command */
1870         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1871         if (!r) {
1872                 pr_err("qbman: wqchan config failed, no response\n");
1873                 return -EIO;
1874         }
1875
1876         /* Decode the outcome */
1877         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1878                      != QBMAN_WQCHAN_CONFIGURE);
1879
1880         /* Determine success or failure */
1881         if (r->rslt != QBMAN_MC_RSLT_OK) {
1882                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1883                        channelid, r->rslt);
1884                 return -EIO;
1885         }
1886
1887         return 0;
1888 }
1889
1890 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1891                                uint64_t ctx)
1892 {
1893         return qbman_swp_CDAN_set(s, channelid,
1894                                   CODE_CDAN_WE_CTX,
1895                                   0, ctx);
1896 }
1897
1898 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1899 {
1900         return qbman_swp_CDAN_set(s, channelid,
1901                                   CODE_CDAN_WE_EN,
1902                                   1, 0);
1903 }
1904
1905 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1906 {
1907         return qbman_swp_CDAN_set(s, channelid,
1908                                   CODE_CDAN_WE_EN,
1909                                   0, 0);
1910 }
1911
1912 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1913                                       uint64_t ctx)
1914 {
1915         return qbman_swp_CDAN_set(s, channelid,
1916                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1917                                   1, ctx);
1918 }
1919
1920 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1921 {
1922         return QBMAN_IDX_FROM_DQRR(dqrr);
1923 }
1924
1925 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1926 {
1927         struct qbman_result *dq;
1928
1929         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1930         return dq;
1931 }