bus/fslmc: rename portal pi index to consumer index
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 /* Internal Function declaration */
65 static int
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67                 const struct qbman_eq_desc *d,
68                 const struct qbman_fd *fd);
69 static int
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71                 const struct qbman_eq_desc *d,
72                 const struct qbman_fd *fd);
73
74 static int
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76                 const struct qbman_eq_desc *d,
77                 const struct qbman_fd *fd);
78 static int
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80                 const struct qbman_eq_desc *d,
81                 const struct qbman_fd *fd);
82
83 static int
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85                 const struct qbman_eq_desc *d,
86                 const struct qbman_fd *fd,
87                 uint32_t *flags,
88                 int num_frames);
89 static int
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91                 const struct qbman_eq_desc *d,
92                 const struct qbman_fd *fd,
93                 uint32_t *flags,
94                 int num_frames);
95
96 static int
97 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
98                 const struct qbman_eq_desc *d,
99                 const struct qbman_fd *fd,
100                 int num_frames);
101 static int
102 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
103                 const struct qbman_eq_desc *d,
104                 const struct qbman_fd *fd,
105                 int num_frames);
106
107 static int
108 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
109 static int
110 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
111
112 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
113 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
114
115 static int
116 qbman_swp_release_direct(struct qbman_swp *s,
117                 const struct qbman_release_desc *d,
118                 const uint64_t *buffers, unsigned int num_buffers);
119 static int
120 qbman_swp_release_mem_back(struct qbman_swp *s,
121                 const struct qbman_release_desc *d,
122                 const uint64_t *buffers, unsigned int num_buffers);
123
124 /* Function pointers */
125 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
126                 const struct qbman_eq_desc *d,
127                 const struct qbman_fd *fd)
128         = qbman_swp_enqueue_array_mode_direct;
129
130 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
131                 const struct qbman_eq_desc *d,
132                 const struct qbman_fd *fd)
133         = qbman_swp_enqueue_ring_mode_direct;
134
135 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
136                 const struct qbman_eq_desc *d,
137                 const struct qbman_fd *fd,
138                 uint32_t *flags,
139                 int num_frames)
140         = qbman_swp_enqueue_multiple_direct;
141
142 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
143                 const struct qbman_eq_desc *d,
144                 const struct qbman_fd *fd,
145                 int num_frames)
146         = qbman_swp_enqueue_multiple_desc_direct;
147
148 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
149                 struct qbman_pull_desc *d)
150         = qbman_swp_pull_direct;
151
152 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
153                 = qbman_swp_dqrr_next_direct;
154
155 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
156                         const struct qbman_release_desc *d,
157                         const uint64_t *buffers, unsigned int num_buffers)
158                         = qbman_swp_release_direct;
159
160 /*********************************/
161 /* Portal constructor/destructor */
162 /*********************************/
163
164 /* Software portals should always be in the power-on state when we initialise,
165  * due to the CCSR-based portal reset functionality that MC has.
166  *
167  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
168  * valid-bits, so we need to support a workaround where we don't trust
169  * valid-bits when detecting new entries until any stale ring entries have been
170  * overwritten at least once. The idea is that we read PI for the first few
171  * entries, then switch to valid-bit after that. The trick is to clear the
172  * bug-work-around boolean once the PI wraps around the ring for the first time.
173  *
174  * Note: this still carries a slight additional cost once the decrementer hits
175  * zero.
176  */
177 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
178 {
179         int ret;
180         uint32_t eqcr_pi;
181         uint32_t mask_size;
182         struct qbman_swp *p = malloc(sizeof(*p));
183
184         if (!p)
185                 return NULL;
186
187         memset(p, 0, sizeof(struct qbman_swp));
188
189         p->desc = *d;
190 #ifdef QBMAN_CHECKING
191         p->mc.check = swp_mc_can_start;
192 #endif
193         p->mc.valid_bit = QB_VALID_BIT;
194         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
195         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
196         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
197         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
198                         && (d->cena_access_mode == qman_cena_fastest_access))
199                 p->mr.valid_bit = QB_VALID_BIT;
200
201         atomic_set(&p->vdq.busy, 1);
202         p->vdq.valid_bit = QB_VALID_BIT;
203         p->dqrr.valid_bit = QB_VALID_BIT;
204         qman_version = p->desc.qman_version;
205         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
206                 p->dqrr.dqrr_size = 4;
207                 p->dqrr.reset_bug = 1;
208         } else {
209                 p->dqrr.dqrr_size = 8;
210                 p->dqrr.reset_bug = 0;
211         }
212
213         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
214         if (ret) {
215                 free(p);
216                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
217                 return NULL;
218         }
219
220         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
221          * in default state which is an error
222          */
223         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
224                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
225                 free(p);
226                 return NULL;
227         }
228
229         /* SDQCR needs to be initialized to 0 when no channels are
230          * being dequeued from or else the QMan HW will indicate an
231          * error.  The values that were calculated above will be
232          * applied when dequeues from a specific channel are enabled.
233          */
234         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
235
236         p->eqcr.pi_ring_size = 8;
237         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
238                         && (d->cena_access_mode == qman_cena_fastest_access)) {
239                 p->eqcr.pi_ring_size = 32;
240                 qbman_swp_enqueue_array_mode_ptr =
241                                 qbman_swp_enqueue_array_mode_mem_back;
242                 qbman_swp_enqueue_ring_mode_ptr =
243                                 qbman_swp_enqueue_ring_mode_mem_back;
244                 qbman_swp_enqueue_multiple_ptr =
245                                 qbman_swp_enqueue_multiple_mem_back;
246                 qbman_swp_enqueue_multiple_desc_ptr =
247                                 qbman_swp_enqueue_multiple_desc_mem_back;
248                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
249                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
250                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
251         }
252
253         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
254                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
255         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
256         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
257         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
258         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
259                         && (d->cena_access_mode == qman_cena_fastest_access))
260                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
261                                              & p->eqcr.pi_ci_mask;
262         else
263                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
264                                              & p->eqcr.pi_ci_mask;
265         p->eqcr.available = p->eqcr.pi_ring_size -
266                                 qm_cyc_diff(p->eqcr.pi_ring_size,
267                                 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
268                                 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
269
270         portal_idx_map[p->desc.idx] = p;
271         return p;
272 }
273
274 void qbman_swp_finish(struct qbman_swp *p)
275 {
276 #ifdef QBMAN_CHECKING
277         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
278 #endif
279         qbman_swp_sys_finish(&p->sys);
280         portal_idx_map[p->desc.idx] = NULL;
281         free(p);
282 }
283
284 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
285 {
286         return &p->desc;
287 }
288
289 /**************/
290 /* Interrupts */
291 /**************/
292
293 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
294 {
295         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
296 }
297
298 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
299 {
300         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
301 }
302
303 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
304 {
305         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
306 }
307
308 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
309 {
310         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
311 }
312
313 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
314 {
315         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
316 }
317
318 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
319 {
320         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
321 }
322
323 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
324 {
325         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
326 }
327
328 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
329 {
330         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
331 }
332
333 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
334 {
335         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
336 }
337
338 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
339 {
340         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
341 }
342
343 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
344 {
345         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
346 }
347
348 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
349 {
350         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
351                          inhibit ? 0xffffffff : 0);
352 }
353
354 /***********************/
355 /* Management commands */
356 /***********************/
357
358 /*
359  * Internal code common to all types of management commands.
360  */
361
362 void *qbman_swp_mc_start(struct qbman_swp *p)
363 {
364         void *ret;
365 #ifdef QBMAN_CHECKING
366         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
367 #endif
368         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
369                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
370                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
371         else
372                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
373 #ifdef QBMAN_CHECKING
374         if (!ret)
375                 p->mc.check = swp_mc_can_submit;
376 #endif
377         return ret;
378 }
379
380 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
381 {
382         uint8_t *v = cmd;
383 #ifdef QBMAN_CHECKING
384         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
385 #endif
386         /* TBD: "|=" is going to hurt performance. Need to move as many fields
387          * out of word zero, and for those that remain, the "OR" needs to occur
388          * at the caller side. This debug check helps to catch cases where the
389          * caller wants to OR but has forgotten to do so.
390          */
391         QBMAN_BUG_ON((*v & cmd_verb) != *v);
392         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
393                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
394                 *v = cmd_verb | p->mr.valid_bit;
395                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
396                 dma_wmb();
397                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
398         } else {
399                 dma_wmb();
400                 *v = cmd_verb | p->mc.valid_bit;
401                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
402                 clean(cmd);
403         }
404 #ifdef QBMAN_CHECKING
405         p->mc.check = swp_mc_can_poll;
406 #endif
407 }
408
409 void *qbman_swp_mc_result(struct qbman_swp *p)
410 {
411         uint32_t *ret, verb;
412 #ifdef QBMAN_CHECKING
413         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
414 #endif
415         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
416                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
417                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
418                 /* Command completed if the valid bit is toggled */
419                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
420                         return NULL;
421                 /* Remove the valid-bit -
422                  * command completed iff the rest is non-zero
423                  */
424                 verb = ret[0] & ~QB_VALID_BIT;
425                 if (!verb)
426                         return NULL;
427                 p->mr.valid_bit ^= QB_VALID_BIT;
428         } else {
429                 qbman_cena_invalidate_prefetch(&p->sys,
430                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
431                 ret = qbman_cena_read(&p->sys,
432                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
433                 /* Remove the valid-bit -
434                  * command completed iff the rest is non-zero
435                  */
436                 verb = ret[0] & ~QB_VALID_BIT;
437                 if (!verb)
438                         return NULL;
439                 p->mc.valid_bit ^= QB_VALID_BIT;
440         }
441 #ifdef QBMAN_CHECKING
442         p->mc.check = swp_mc_can_start;
443 #endif
444         return ret;
445 }
446
447 /***********/
448 /* Enqueue */
449 /***********/
450
451 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
452 enum qb_enqueue_commands {
453         enqueue_empty = 0,
454         enqueue_response_always = 1,
455         enqueue_rejects_to_fq = 2
456 };
457
458 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
459 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
460 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
461 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
462 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
463 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
464 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
465 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
466
467 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
468 {
469         memset(d, 0, sizeof(*d));
470 }
471
472 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
473 {
474         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
475         if (respond_success)
476                 d->eq.verb |= enqueue_response_always;
477         else
478                 d->eq.verb |= enqueue_rejects_to_fq;
479 }
480
481 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
482                            uint16_t opr_id, uint16_t seqnum, int incomplete)
483 {
484         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
485         if (respond_success)
486                 d->eq.verb |= enqueue_response_always;
487         else
488                 d->eq.verb |= enqueue_rejects_to_fq;
489
490         d->eq.orpid = opr_id;
491         d->eq.seqnum = seqnum;
492         if (incomplete)
493                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
494         else
495                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
496 }
497
498 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
499                                 uint16_t seqnum)
500 {
501         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
502         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
503         d->eq.orpid = opr_id;
504         d->eq.seqnum = seqnum;
505         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
506         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
507 }
508
509 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
510                                 uint16_t seqnum)
511 {
512         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
513         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
514         d->eq.orpid = opr_id;
515         d->eq.seqnum = seqnum;
516         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
517         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
518 }
519
520 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
521                                 dma_addr_t storage_phys,
522                                 int stash)
523 {
524         d->eq.rsp_addr = storage_phys;
525         d->eq.wae = stash;
526 }
527
528 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
529 {
530         d->eq.rspid = token;
531 }
532
533 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
534 {
535         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
536         d->eq.tgtid = fqid;
537 }
538
539 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
540                           uint16_t qd_bin, uint8_t qd_prio)
541 {
542         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
543         d->eq.tgtid = qdid;
544         d->eq.qdbin = qd_bin;
545         d->eq.qpri = qd_prio;
546 }
547
548 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
549 {
550         if (enable)
551                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
552         else
553                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
554 }
555
556 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
557                            uint8_t dqrr_idx, int park)
558 {
559         if (enable) {
560                 d->eq.dca = dqrr_idx;
561                 if (park)
562                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
563                 else
564                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
565                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
566         } else {
567                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
568         }
569 }
570
571 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
572 #define EQAR_VB(eqar)      ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
574
575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
576                                                    uint8_t idx)
577 {
578         if (idx < 16)
579                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
580                                      QMAN_RT_MODE);
581         else
582                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
583                                      (idx - 16) * 4,
584                                      QMAN_RT_MODE);
585 }
586
587
588 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
589                                                const struct qbman_eq_desc *d,
590                                                const struct qbman_fd *fd)
591 {
592         uint32_t *p;
593         const uint32_t *cl = qb_cl(d);
594         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
595
596         pr_debug("EQAR=%08x\n", eqar);
597         if (!EQAR_SUCCESS(eqar))
598                 return -EBUSY;
599         p = qbman_cena_write_start_wo_shadow(&s->sys,
600                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
601         memcpy(&p[1], &cl[1], 28);
602         memcpy(&p[8], fd, sizeof(*fd));
603
604         /* Set the verb byte, have to substitute in the valid-bit */
605         dma_wmb();
606         p[0] = cl[0] | EQAR_VB(eqar);
607         qbman_cena_write_complete_wo_shadow(&s->sys,
608                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
609         return 0;
610 }
611 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
612                                                  const struct qbman_eq_desc *d,
613                                                  const struct qbman_fd *fd)
614 {
615         uint32_t *p;
616         const uint32_t *cl = qb_cl(d);
617         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
618
619         pr_debug("EQAR=%08x\n", eqar);
620         if (!EQAR_SUCCESS(eqar))
621                 return -EBUSY;
622         p = qbman_cena_write_start_wo_shadow(&s->sys,
623                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
624         memcpy(&p[1], &cl[1], 28);
625         memcpy(&p[8], fd, sizeof(*fd));
626
627         /* Set the verb byte, have to substitute in the valid-bit */
628         p[0] = cl[0] | EQAR_VB(eqar);
629         dma_wmb();
630         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
631         return 0;
632 }
633
634 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
635                                                const struct qbman_eq_desc *d,
636                                                const struct qbman_fd *fd)
637 {
638         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
639 }
640
641 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
642                                               const struct qbman_eq_desc *d,
643                                               const struct qbman_fd *fd)
644 {
645         uint32_t *p;
646         const uint32_t *cl = qb_cl(d);
647         uint32_t eqcr_ci, full_mask, half_mask;
648
649         half_mask = (s->eqcr.pi_ci_mask>>1);
650         full_mask = s->eqcr.pi_ci_mask;
651         if (!s->eqcr.available) {
652                 eqcr_ci = s->eqcr.ci;
653                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
654                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
655                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
656                                 eqcr_ci, s->eqcr.ci);
657                 if (!s->eqcr.available)
658                         return -EBUSY;
659         }
660
661         p = qbman_cena_write_start_wo_shadow(&s->sys,
662                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
663         memcpy(&p[1], &cl[1], 28);
664         memcpy(&p[8], fd, sizeof(*fd));
665         lwsync();
666
667         /* Set the verb byte, have to substitute in the valid-bit */
668         p[0] = cl[0] | s->eqcr.pi_vb;
669         qbman_cena_write_complete_wo_shadow(&s->sys,
670                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
671         s->eqcr.pi++;
672         s->eqcr.pi &= full_mask;
673         s->eqcr.available--;
674         if (!(s->eqcr.pi & half_mask))
675                 s->eqcr.pi_vb ^= QB_VALID_BIT;
676
677         return 0;
678 }
679
680 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
681                                                 const struct qbman_eq_desc *d,
682                                                 const struct qbman_fd *fd)
683 {
684         uint32_t *p;
685         const uint32_t *cl = qb_cl(d);
686         uint32_t eqcr_ci, full_mask, half_mask;
687
688         half_mask = (s->eqcr.pi_ci_mask>>1);
689         full_mask = s->eqcr.pi_ci_mask;
690         if (!s->eqcr.available) {
691                 eqcr_ci = s->eqcr.ci;
692                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
693                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
694                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
695                                 eqcr_ci, s->eqcr.ci);
696                 if (!s->eqcr.available)
697                         return -EBUSY;
698         }
699
700         p = qbman_cena_write_start_wo_shadow(&s->sys,
701                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
702         memcpy(&p[1], &cl[1], 28);
703         memcpy(&p[8], fd, sizeof(*fd));
704
705         /* Set the verb byte, have to substitute in the valid-bit */
706         p[0] = cl[0] | s->eqcr.pi_vb;
707         s->eqcr.pi++;
708         s->eqcr.pi &= full_mask;
709         s->eqcr.available--;
710         if (!(s->eqcr.pi & half_mask))
711                 s->eqcr.pi_vb ^= QB_VALID_BIT;
712         dma_wmb();
713         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
714                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
715         return 0;
716 }
717
718 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
719                                        const struct qbman_eq_desc *d,
720                                        const struct qbman_fd *fd)
721 {
722         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
723 }
724
725 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
726                       const struct qbman_fd *fd)
727 {
728         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
729                 return qbman_swp_enqueue_array_mode(s, d, fd);
730         else    /* Use ring mode by default */
731                 return qbman_swp_enqueue_ring_mode(s, d, fd);
732 }
733
734 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
735                                              const struct qbman_eq_desc *d,
736                                              const struct qbman_fd *fd,
737                                              uint32_t *flags,
738                                              int num_frames)
739 {
740         uint32_t *p = NULL;
741         const uint32_t *cl = qb_cl(d);
742         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
743         int i, num_enqueued = 0;
744         uint64_t addr_cena;
745
746         half_mask = (s->eqcr.pi_ci_mask>>1);
747         full_mask = s->eqcr.pi_ci_mask;
748         if (!s->eqcr.available) {
749                 eqcr_ci = s->eqcr.ci;
750                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
751                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
752                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
753                                 eqcr_ci, s->eqcr.ci);
754                 if (!s->eqcr.available)
755                         return 0;
756         }
757
758         eqcr_pi = s->eqcr.pi;
759         num_enqueued = (s->eqcr.available < num_frames) ?
760                         s->eqcr.available : num_frames;
761         s->eqcr.available -= num_enqueued;
762         /* Fill in the EQCR ring */
763         for (i = 0; i < num_enqueued; i++) {
764                 p = qbman_cena_write_start_wo_shadow(&s->sys,
765                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
766                 memcpy(&p[1], &cl[1], 28);
767                 memcpy(&p[8], &fd[i], sizeof(*fd));
768                 eqcr_pi++;
769         }
770
771         lwsync();
772
773         /* Set the verb byte, have to substitute in the valid-bit */
774         eqcr_pi = s->eqcr.pi;
775         for (i = 0; i < num_enqueued; i++) {
776                 p = qbman_cena_write_start_wo_shadow(&s->sys,
777                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
778                 p[0] = cl[0] | s->eqcr.pi_vb;
779                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
780                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
781
782                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
783                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
784                 }
785                 eqcr_pi++;
786                 if (!(eqcr_pi & half_mask))
787                         s->eqcr.pi_vb ^= QB_VALID_BIT;
788         }
789
790         /* Flush all the cacheline without load/store in between */
791         eqcr_pi = s->eqcr.pi;
792         addr_cena = (size_t)s->sys.addr_cena;
793         for (i = 0; i < num_enqueued; i++) {
794                 dcbf((uintptr_t)(addr_cena +
795                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
796                 eqcr_pi++;
797         }
798         s->eqcr.pi = eqcr_pi & full_mask;
799
800         return num_enqueued;
801 }
802
803 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
804                                                const struct qbman_eq_desc *d,
805                                                const struct qbman_fd *fd,
806                                                uint32_t *flags,
807                                                int num_frames)
808 {
809         uint32_t *p = NULL;
810         const uint32_t *cl = qb_cl(d);
811         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
812         int i, num_enqueued = 0;
813
814         half_mask = (s->eqcr.pi_ci_mask>>1);
815         full_mask = s->eqcr.pi_ci_mask;
816         if (!s->eqcr.available) {
817                 eqcr_ci = s->eqcr.ci;
818                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
819                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
820                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
821                                         eqcr_ci, s->eqcr.ci);
822                 if (!s->eqcr.available)
823                         return 0;
824         }
825
826         eqcr_pi = s->eqcr.pi;
827         num_enqueued = (s->eqcr.available < num_frames) ?
828                         s->eqcr.available : num_frames;
829         s->eqcr.available -= num_enqueued;
830         /* Fill in the EQCR ring */
831         for (i = 0; i < num_enqueued; i++) {
832                 p = qbman_cena_write_start_wo_shadow(&s->sys,
833                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
834                 memcpy(&p[1], &cl[1], 28);
835                 memcpy(&p[8], &fd[i], sizeof(*fd));
836                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
837                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
838
839                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
840                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
841                 }
842                 eqcr_pi++;
843                 p[0] = cl[0] | s->eqcr.pi_vb;
844
845                 if (!(eqcr_pi & half_mask))
846                         s->eqcr.pi_vb ^= QB_VALID_BIT;
847         }
848         s->eqcr.pi = eqcr_pi & full_mask;
849
850         dma_wmb();
851         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
852                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
853         return num_enqueued;
854 }
855
856 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
857                                       const struct qbman_eq_desc *d,
858                                       const struct qbman_fd *fd,
859                                       uint32_t *flags,
860                                       int num_frames)
861 {
862         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
863 }
864
865 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
866                                         const struct qbman_eq_desc *d,
867                                         const struct qbman_fd *fd,
868                                         int num_frames)
869 {
870         uint32_t *p;
871         const uint32_t *cl;
872         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
873         int i, num_enqueued = 0;
874         uint64_t addr_cena;
875
876         half_mask = (s->eqcr.pi_ci_mask>>1);
877         full_mask = s->eqcr.pi_ci_mask;
878         if (!s->eqcr.available) {
879                 eqcr_ci = s->eqcr.ci;
880                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
881                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
882                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
883                                         eqcr_ci, s->eqcr.ci);
884                 if (!s->eqcr.available)
885                         return 0;
886         }
887
888         eqcr_pi = s->eqcr.pi;
889         num_enqueued = (s->eqcr.available < num_frames) ?
890                         s->eqcr.available : num_frames;
891         s->eqcr.available -= num_enqueued;
892         /* Fill in the EQCR ring */
893         for (i = 0; i < num_enqueued; i++) {
894                 p = qbman_cena_write_start_wo_shadow(&s->sys,
895                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
896                 cl = qb_cl(&d[i]);
897                 memcpy(&p[1], &cl[1], 28);
898                 memcpy(&p[8], &fd[i], sizeof(*fd));
899                 eqcr_pi++;
900         }
901
902         lwsync();
903
904         /* Set the verb byte, have to substitute in the valid-bit */
905         eqcr_pi = s->eqcr.pi;
906         for (i = 0; i < num_enqueued; i++) {
907                 p = qbman_cena_write_start_wo_shadow(&s->sys,
908                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
909                 cl = qb_cl(&d[i]);
910                 p[0] = cl[0] | s->eqcr.pi_vb;
911                 eqcr_pi++;
912                 if (!(eqcr_pi & half_mask))
913                         s->eqcr.pi_vb ^= QB_VALID_BIT;
914         }
915
916         /* Flush all the cacheline without load/store in between */
917         eqcr_pi = s->eqcr.pi;
918         addr_cena = (size_t)s->sys.addr_cena;
919         for (i = 0; i < num_enqueued; i++) {
920                 dcbf((uintptr_t)(addr_cena +
921                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
922                 eqcr_pi++;
923         }
924         s->eqcr.pi = eqcr_pi & full_mask;
925
926         return num_enqueued;
927 }
928
929 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
930                                         const struct qbman_eq_desc *d,
931                                         const struct qbman_fd *fd,
932                                         int num_frames)
933 {
934         uint32_t *p;
935         const uint32_t *cl;
936         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
937         int i, num_enqueued = 0;
938
939         half_mask = (s->eqcr.pi_ci_mask>>1);
940         full_mask = s->eqcr.pi_ci_mask;
941         if (!s->eqcr.available) {
942                 eqcr_ci = s->eqcr.ci;
943                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
944                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
945                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
946                                         eqcr_ci, s->eqcr.ci);
947                 if (!s->eqcr.available)
948                         return 0;
949         }
950
951         eqcr_pi = s->eqcr.pi;
952         num_enqueued = (s->eqcr.available < num_frames) ?
953                         s->eqcr.available : num_frames;
954         s->eqcr.available -= num_enqueued;
955         /* Fill in the EQCR ring */
956         for (i = 0; i < num_enqueued; i++) {
957                 p = qbman_cena_write_start_wo_shadow(&s->sys,
958                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
959                 cl = qb_cl(&d[i]);
960                 memcpy(&p[1], &cl[1], 28);
961                 memcpy(&p[8], &fd[i], sizeof(*fd));
962                 eqcr_pi++;
963         }
964
965         /* Set the verb byte, have to substitute in the valid-bit */
966         eqcr_pi = s->eqcr.pi;
967         for (i = 0; i < num_enqueued; i++) {
968                 p = qbman_cena_write_start_wo_shadow(&s->sys,
969                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
970                 cl = qb_cl(&d[i]);
971                 p[0] = cl[0] | s->eqcr.pi_vb;
972                 eqcr_pi++;
973                 if (!(eqcr_pi & half_mask))
974                         s->eqcr.pi_vb ^= QB_VALID_BIT;
975         }
976
977         s->eqcr.pi = eqcr_pi & full_mask;
978
979         dma_wmb();
980         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
981                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
982
983         return num_enqueued;
984 }
985 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
986                                            const struct qbman_eq_desc *d,
987                                            const struct qbman_fd *fd,
988                                            int num_frames)
989 {
990         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
991 }
992
993 /*************************/
994 /* Static (push) dequeue */
995 /*************************/
996
997 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
998 {
999         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1000
1001         QBMAN_BUG_ON(channel_idx > 15);
1002         *enabled = src | (1 << channel_idx);
1003 }
1004
1005 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1006 {
1007         uint16_t dqsrc;
1008
1009         QBMAN_BUG_ON(channel_idx > 15);
1010         if (enable)
1011                 s->sdq |= 1 << channel_idx;
1012         else
1013                 s->sdq &= ~(1 << channel_idx);
1014
1015         /* Read make the complete src map.  If no channels are enabled
1016          * the SDQCR must be 0 or else QMan will assert errors
1017          */
1018         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1019         if (dqsrc != 0)
1020                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1021         else
1022                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1023 }
1024
1025 /***************************/
1026 /* Volatile (pull) dequeue */
1027 /***************************/
1028
1029 /* These should be const, eventually */
1030 #define QB_VDQCR_VERB_DCT_SHIFT    0
1031 #define QB_VDQCR_VERB_DT_SHIFT     2
1032 #define QB_VDQCR_VERB_RLS_SHIFT    4
1033 #define QB_VDQCR_VERB_WAE_SHIFT    5
1034 #define QB_VDQCR_VERB_RAD_SHIFT    6
1035
1036 enum qb_pull_dt_e {
1037         qb_pull_dt_channel,
1038         qb_pull_dt_workqueue,
1039         qb_pull_dt_framequeue
1040 };
1041
1042 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1043 {
1044         memset(d, 0, sizeof(*d));
1045 }
1046
1047 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1048                                  struct qbman_result *storage,
1049                                  dma_addr_t storage_phys,
1050                                  int stash)
1051 {
1052         d->pull.rsp_addr_virt = (size_t)storage;
1053
1054         if (!storage) {
1055                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1056                 return;
1057         }
1058         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1059         if (stash)
1060                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1061         else
1062                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1063
1064         d->pull.rsp_addr = storage_phys;
1065 }
1066
1067 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1068                                    uint8_t numframes)
1069 {
1070         d->pull.numf = numframes - 1;
1071 }
1072
1073 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1074 {
1075         d->pull.tok = token;
1076 }
1077
1078 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1079 {
1080         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1081         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1082         d->pull.dq_src = fqid;
1083 }
1084
1085 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1086                             enum qbman_pull_type_e dct)
1087 {
1088         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1089         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1090         d->pull.dq_src = wqid;
1091 }
1092
1093 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1094                                  enum qbman_pull_type_e dct)
1095 {
1096         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1097         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1098         d->pull.dq_src = chid;
1099 }
1100
1101 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1102 {
1103         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1104                 if (rad)
1105                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1106                 else
1107                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1108         } else {
1109                 printf("The RAD feature is not valid when RLS = 0\n");
1110         }
1111 }
1112
1113 static int qbman_swp_pull_direct(struct qbman_swp *s,
1114                                  struct qbman_pull_desc *d)
1115 {
1116         uint32_t *p;
1117         uint32_t *cl = qb_cl(d);
1118
1119         if (!atomic_dec_and_test(&s->vdq.busy)) {
1120                 atomic_inc(&s->vdq.busy);
1121                 return -EBUSY;
1122         }
1123
1124         d->pull.tok = s->sys.idx + 1;
1125         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1126         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1127         memcpy(&p[1], &cl[1], 12);
1128
1129         /* Set the verb byte, have to substitute in the valid-bit */
1130         lwsync();
1131         p[0] = cl[0] | s->vdq.valid_bit;
1132         s->vdq.valid_bit ^= QB_VALID_BIT;
1133         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1134
1135         return 0;
1136 }
1137
1138 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1139                                    struct qbman_pull_desc *d)
1140 {
1141         uint32_t *p;
1142         uint32_t *cl = qb_cl(d);
1143
1144         if (!atomic_dec_and_test(&s->vdq.busy)) {
1145                 atomic_inc(&s->vdq.busy);
1146                 return -EBUSY;
1147         }
1148
1149         d->pull.tok = s->sys.idx + 1;
1150         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1151         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1152         memcpy(&p[1], &cl[1], 12);
1153
1154         /* Set the verb byte, have to substitute in the valid-bit */
1155         p[0] = cl[0] | s->vdq.valid_bit;
1156         s->vdq.valid_bit ^= QB_VALID_BIT;
1157         dma_wmb();
1158         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1159
1160         return 0;
1161 }
1162
1163 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1164 {
1165         return qbman_swp_pull_ptr(s, d);
1166 }
1167
1168 /****************/
1169 /* Polling DQRR */
1170 /****************/
1171
1172 #define QMAN_DQRR_PI_MASK              0xf
1173
1174 #define QBMAN_RESULT_DQ        0x60
1175 #define QBMAN_RESULT_FQRN      0x21
1176 #define QBMAN_RESULT_FQRNI     0x22
1177 #define QBMAN_RESULT_FQPN      0x24
1178 #define QBMAN_RESULT_FQDAN     0x25
1179 #define QBMAN_RESULT_CDAN      0x26
1180 #define QBMAN_RESULT_CSCN_MEM  0x27
1181 #define QBMAN_RESULT_CGCU      0x28
1182 #define QBMAN_RESULT_BPSCN     0x29
1183 #define QBMAN_RESULT_CSCN_WQ   0x2a
1184
1185 #include <rte_prefetch.h>
1186
1187 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1188 {
1189         const struct qbman_result *p;
1190
1191         p = qbman_cena_read_wo_shadow(&s->sys,
1192                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1193         rte_prefetch0(p);
1194 }
1195
1196 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1197  * only once, so repeated calls can return a sequence of DQRR entries, without
1198  * requiring they be consumed immediately or in any particular order.
1199  */
1200 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1201 {
1202         return qbman_swp_dqrr_next_ptr(s);
1203 }
1204
1205 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1206 {
1207         uint32_t verb;
1208         uint32_t response_verb;
1209         uint32_t flags;
1210         const struct qbman_result *p;
1211
1212         /* Before using valid-bit to detect if something is there, we have to
1213          * handle the case of the DQRR reset bug...
1214          */
1215         if (s->dqrr.reset_bug) {
1216                 /* We pick up new entries by cache-inhibited producer index,
1217                  * which means that a non-coherent mapping would require us to
1218                  * invalidate and read *only* once that PI has indicated that
1219                  * there's an entry here. The first trip around the DQRR ring
1220                  * will be much less efficient than all subsequent trips around
1221                  * it...
1222                  */
1223                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1224                              QMAN_DQRR_PI_MASK;
1225
1226                 /* there are new entries if pi != next_idx */
1227                 if (pi == s->dqrr.next_idx)
1228                         return NULL;
1229
1230                 /* if next_idx is/was the last ring index, and 'pi' is
1231                  * different, we can disable the workaround as all the ring
1232                  * entries have now been DMA'd to so valid-bit checking is
1233                  * repaired. Note: this logic needs to be based on next_idx
1234                  * (which increments one at a time), rather than on pi (which
1235                  * can burst and wrap-around between our snapshots of it).
1236                  */
1237                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1238                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1239                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1240                                  s->dqrr.next_idx, pi);
1241                         s->dqrr.reset_bug = 0;
1242                 }
1243                 qbman_cena_invalidate_prefetch(&s->sys,
1244                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1245         }
1246         p = qbman_cena_read_wo_shadow(&s->sys,
1247                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1248
1249         verb = p->dq.verb;
1250
1251         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1252          * in the DQRR reset bug workaround, we shouldn't need to skip these
1253          * check, because we've already determined that a new entry is available
1254          * and we've invalidated the cacheline before reading it, so the
1255          * valid-bit behaviour is repaired and should tell us what we already
1256          * knew from reading PI.
1257          */
1258         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1259                 return NULL;
1260
1261         /* There's something there. Move "next_idx" attention to the next ring
1262          * entry (and prefetch it) before returning what we found.
1263          */
1264         s->dqrr.next_idx++;
1265         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1266                 s->dqrr.next_idx = 0;
1267                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1268         }
1269         /* If this is the final response to a volatile dequeue command
1270          * indicate that the vdq is no longer busy
1271          */
1272         flags = p->dq.stat;
1273         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1274         if ((response_verb == QBMAN_RESULT_DQ) &&
1275             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1276             (flags & QBMAN_DQ_STAT_EXPIRED))
1277                 atomic_inc(&s->vdq.busy);
1278
1279         return p;
1280 }
1281
1282 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1283 {
1284         uint32_t verb;
1285         uint32_t response_verb;
1286         uint32_t flags;
1287         const struct qbman_result *p;
1288
1289         p = qbman_cena_read_wo_shadow(&s->sys,
1290                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1291
1292         verb = p->dq.verb;
1293
1294         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1295          * in the DQRR reset bug workaround, we shouldn't need to skip these
1296          * check, because we've already determined that a new entry is available
1297          * and we've invalidated the cacheline before reading it, so the
1298          * valid-bit behaviour is repaired and should tell us what we already
1299          * knew from reading PI.
1300          */
1301         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1302                 return NULL;
1303
1304         /* There's something there. Move "next_idx" attention to the next ring
1305          * entry (and prefetch it) before returning what we found.
1306          */
1307         s->dqrr.next_idx++;
1308         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1309                 s->dqrr.next_idx = 0;
1310                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1311         }
1312         /* If this is the final response to a volatile dequeue command
1313          * indicate that the vdq is no longer busy
1314          */
1315         flags = p->dq.stat;
1316         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1317         if ((response_verb == QBMAN_RESULT_DQ)
1318                         && (flags & QBMAN_DQ_STAT_VOLATILE)
1319                         && (flags & QBMAN_DQ_STAT_EXPIRED))
1320                 atomic_inc(&s->vdq.busy);
1321         return p;
1322 }
1323
1324 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1325 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1326                             const struct qbman_result *dq)
1327 {
1328         qbman_cinh_write(&s->sys,
1329                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1330 }
1331
1332 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1333 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1334                             uint8_t dqrr_index)
1335 {
1336         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1337 }
1338
1339 /*********************************/
1340 /* Polling user-provided storage */
1341 /*********************************/
1342
1343 int qbman_result_has_new_result(struct qbman_swp *s,
1344                                 struct qbman_result *dq)
1345 {
1346         if (dq->dq.tok == 0)
1347                 return 0;
1348
1349         /*
1350          * Set token to be 0 so we will detect change back to 1
1351          * next time the looping is traversed. Const is cast away here
1352          * as we want users to treat the dequeue responses as read only.
1353          */
1354         ((struct qbman_result *)dq)->dq.tok = 0;
1355
1356         /*
1357          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1358          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1359          * that makes it available. Eg. we may be looking at our 10th dequeue
1360          * result, having released VDQCR after the 1st result and it is now
1361          * busy due to some other command!
1362          */
1363         if (s->vdq.storage == dq) {
1364                 s->vdq.storage = NULL;
1365                 atomic_inc(&s->vdq.busy);
1366         }
1367
1368         return 1;
1369 }
1370
1371 int qbman_check_new_result(struct qbman_result *dq)
1372 {
1373         if (dq->dq.tok == 0)
1374                 return 0;
1375
1376         /*
1377          * Set token to be 0 so we will detect change back to 1
1378          * next time the looping is traversed. Const is cast away here
1379          * as we want users to treat the dequeue responses as read only.
1380          */
1381         ((struct qbman_result *)dq)->dq.tok = 0;
1382
1383         return 1;
1384 }
1385
1386 int qbman_check_command_complete(struct qbman_result *dq)
1387 {
1388         struct qbman_swp *s;
1389
1390         if (dq->dq.tok == 0)
1391                 return 0;
1392
1393         s = portal_idx_map[dq->dq.tok - 1];
1394         /*
1395          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1396          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1397          * that makes it available. Eg. we may be looking at our 10th dequeue
1398          * result, having released VDQCR after the 1st result and it is now
1399          * busy due to some other command!
1400          */
1401         if (s->vdq.storage == dq) {
1402                 s->vdq.storage = NULL;
1403                 atomic_inc(&s->vdq.busy);
1404         }
1405
1406         return 1;
1407 }
1408
1409 /********************************/
1410 /* Categorising qbman results   */
1411 /********************************/
1412
1413 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1414                                       uint8_t x)
1415 {
1416         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1417
1418         return (response_verb == x);
1419 }
1420
1421 int qbman_result_is_DQ(const struct qbman_result *dq)
1422 {
1423         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1424 }
1425
1426 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1427 {
1428         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1429 }
1430
1431 int qbman_result_is_CDAN(const struct qbman_result *dq)
1432 {
1433         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1434 }
1435
1436 int qbman_result_is_CSCN(const struct qbman_result *dq)
1437 {
1438         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1439                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1440 }
1441
1442 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1443 {
1444         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1445 }
1446
1447 int qbman_result_is_CGCU(const struct qbman_result *dq)
1448 {
1449         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1450 }
1451
1452 int qbman_result_is_FQRN(const struct qbman_result *dq)
1453 {
1454         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1455 }
1456
1457 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1458 {
1459         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1460 }
1461
1462 int qbman_result_is_FQPN(const struct qbman_result *dq)
1463 {
1464         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1465 }
1466
1467 /*********************************/
1468 /* Parsing frame dequeue results */
1469 /*********************************/
1470
1471 /* These APIs assume qbman_result_is_DQ() is TRUE */
1472
1473 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1474 {
1475         return dq->dq.stat;
1476 }
1477
1478 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1479 {
1480         return dq->dq.seqnum;
1481 }
1482
1483 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1484 {
1485         return dq->dq.oprid;
1486 }
1487
1488 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1489 {
1490         return dq->dq.fqid;
1491 }
1492
1493 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1494 {
1495         return dq->dq.fq_byte_cnt;
1496 }
1497
1498 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1499 {
1500         return dq->dq.fq_frm_cnt;
1501 }
1502
1503 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1504 {
1505         return dq->dq.fqd_ctx;
1506 }
1507
1508 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1509 {
1510         return (const struct qbman_fd *)&dq->dq.fd[0];
1511 }
1512
1513 /**************************************/
1514 /* Parsing state-change notifications */
1515 /**************************************/
1516 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1517 {
1518         return scn->scn.state;
1519 }
1520
1521 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1522 {
1523         return scn->scn.rid_tok;
1524 }
1525
1526 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1527 {
1528         return scn->scn.ctx;
1529 }
1530
1531 /*****************/
1532 /* Parsing BPSCN */
1533 /*****************/
1534 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1535 {
1536         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1537 }
1538
1539 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1540 {
1541         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1542 }
1543
1544 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1545 {
1546         return (int)(qbman_result_SCN_state(scn) & 0x2);
1547 }
1548
1549 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1550 {
1551         return (int)(qbman_result_SCN_state(scn) & 0x4);
1552 }
1553
1554 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1555 {
1556         return qbman_result_SCN_ctx(scn);
1557 }
1558
1559 /*****************/
1560 /* Parsing CGCU  */
1561 /*****************/
1562 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1563 {
1564         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1565 }
1566
1567 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1568 {
1569         return qbman_result_SCN_ctx(scn);
1570 }
1571
1572 /******************/
1573 /* Buffer release */
1574 /******************/
1575 #define QB_BR_RC_VALID_SHIFT  5
1576 #define QB_BR_RCDI_SHIFT      6
1577
1578 void qbman_release_desc_clear(struct qbman_release_desc *d)
1579 {
1580         memset(d, 0, sizeof(*d));
1581         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1582 }
1583
1584 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1585 {
1586         d->br.bpid = bpid;
1587 }
1588
1589 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1590 {
1591         if (enable)
1592                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1593         else
1594                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1595 }
1596
1597 #define RAR_IDX(rar)     ((rar) & 0x7)
1598 #define RAR_VB(rar)      ((rar) & 0x80)
1599 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1600
1601 static int qbman_swp_release_direct(struct qbman_swp *s,
1602                                     const struct qbman_release_desc *d,
1603                                     const uint64_t *buffers,
1604                                     unsigned int num_buffers)
1605 {
1606         uint32_t *p;
1607         const uint32_t *cl = qb_cl(d);
1608         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1609
1610         pr_debug("RAR=%08x\n", rar);
1611         if (!RAR_SUCCESS(rar))
1612                 return -EBUSY;
1613
1614         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1615
1616         /* Start the release command */
1617         p = qbman_cena_write_start_wo_shadow(&s->sys,
1618                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1619
1620         /* Copy the caller's buffer pointers to the command */
1621         u64_to_le32_copy(&p[2], buffers, num_buffers);
1622
1623         /* Set the verb byte, have to substitute in the valid-bit and the
1624          * number of buffers.
1625          */
1626         lwsync();
1627         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1628         qbman_cena_write_complete_wo_shadow(&s->sys,
1629                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1630
1631         return 0;
1632 }
1633
1634 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1635                                       const struct qbman_release_desc *d,
1636                                       const uint64_t *buffers,
1637                                       unsigned int num_buffers)
1638 {
1639         uint32_t *p;
1640         const uint32_t *cl = qb_cl(d);
1641         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1642
1643         pr_debug("RAR=%08x\n", rar);
1644         if (!RAR_SUCCESS(rar))
1645                 return -EBUSY;
1646
1647         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1648
1649         /* Start the release command */
1650         p = qbman_cena_write_start_wo_shadow(&s->sys,
1651                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1652
1653         /* Copy the caller's buffer pointers to the command */
1654         u64_to_le32_copy(&p[2], buffers, num_buffers);
1655
1656         /* Set the verb byte, have to substitute in the valid-bit and the
1657          * number of buffers.
1658          */
1659         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1660         lwsync();
1661         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1662                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1663
1664         return 0;
1665 }
1666
1667 inline int qbman_swp_release(struct qbman_swp *s,
1668                              const struct qbman_release_desc *d,
1669                              const uint64_t *buffers,
1670                              unsigned int num_buffers)
1671 {
1672         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1673 }
1674
1675 /*******************/
1676 /* Buffer acquires */
1677 /*******************/
1678 struct qbman_acquire_desc {
1679         uint8_t verb;
1680         uint8_t reserved;
1681         uint16_t bpid;
1682         uint8_t num;
1683         uint8_t reserved2[59];
1684 };
1685
1686 struct qbman_acquire_rslt {
1687         uint8_t verb;
1688         uint8_t rslt;
1689         uint16_t reserved;
1690         uint8_t num;
1691         uint8_t reserved2[3];
1692         uint64_t buf[7];
1693 };
1694
1695 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1696                       unsigned int num_buffers)
1697 {
1698         struct qbman_acquire_desc *p;
1699         struct qbman_acquire_rslt *r;
1700
1701         if (!num_buffers || (num_buffers > 7))
1702                 return -EINVAL;
1703
1704         /* Start the management command */
1705         p = qbman_swp_mc_start(s);
1706
1707         if (!p)
1708                 return -EBUSY;
1709
1710         /* Encode the caller-provided attributes */
1711         p->bpid = bpid;
1712         p->num = num_buffers;
1713
1714         /* Complete the management command */
1715         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1716         if (!r) {
1717                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1718                        bpid);
1719                 return -EIO;
1720         }
1721
1722         /* Decode the outcome */
1723         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1724
1725         /* Determine success or failure */
1726         if (r->rslt != QBMAN_MC_RSLT_OK) {
1727                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1728                        bpid, r->rslt);
1729                 return -EIO;
1730         }
1731
1732         QBMAN_BUG_ON(r->num > num_buffers);
1733
1734         /* Copy the acquired buffers to the caller's array */
1735         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1736
1737         return (int)r->num;
1738 }
1739
1740 /*****************/
1741 /* FQ management */
1742 /*****************/
1743 struct qbman_alt_fq_state_desc {
1744         uint8_t verb;
1745         uint8_t reserved[3];
1746         uint32_t fqid;
1747         uint8_t reserved2[56];
1748 };
1749
1750 struct qbman_alt_fq_state_rslt {
1751         uint8_t verb;
1752         uint8_t rslt;
1753         uint8_t reserved[62];
1754 };
1755
1756 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1757
1758 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1759                                   uint8_t alt_fq_verb)
1760 {
1761         struct qbman_alt_fq_state_desc *p;
1762         struct qbman_alt_fq_state_rslt *r;
1763
1764         /* Start the management command */
1765         p = qbman_swp_mc_start(s);
1766         if (!p)
1767                 return -EBUSY;
1768
1769         p->fqid = fqid & ALT_FQ_FQID_MASK;
1770
1771         /* Complete the management command */
1772         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1773         if (!r) {
1774                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1775                        alt_fq_verb);
1776                 return -EIO;
1777         }
1778
1779         /* Decode the outcome */
1780         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1781
1782         /* Determine success or failure */
1783         if (r->rslt != QBMAN_MC_RSLT_OK) {
1784                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1785                        fqid, alt_fq_verb, r->rslt);
1786                 return -EIO;
1787         }
1788
1789         return 0;
1790 }
1791
1792 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1793 {
1794         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1795 }
1796
1797 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1798 {
1799         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1800 }
1801
1802 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1803 {
1804         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1805 }
1806
1807 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1808 {
1809         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1810 }
1811
1812 /**********************/
1813 /* Channel management */
1814 /**********************/
1815
1816 struct qbman_cdan_ctrl_desc {
1817         uint8_t verb;
1818         uint8_t reserved;
1819         uint16_t ch;
1820         uint8_t we;
1821         uint8_t ctrl;
1822         uint16_t reserved2;
1823         uint64_t cdan_ctx;
1824         uint8_t reserved3[48];
1825
1826 };
1827
1828 struct qbman_cdan_ctrl_rslt {
1829         uint8_t verb;
1830         uint8_t rslt;
1831         uint16_t ch;
1832         uint8_t reserved[60];
1833 };
1834
1835 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
1836  * would be irresponsible to expose it.
1837  */
1838 #define CODE_CDAN_WE_EN    0x1
1839 #define CODE_CDAN_WE_CTX   0x4
1840
1841 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
1842                               uint8_t we_mask, uint8_t cdan_en,
1843                               uint64_t ctx)
1844 {
1845         struct qbman_cdan_ctrl_desc *p;
1846         struct qbman_cdan_ctrl_rslt *r;
1847
1848         /* Start the management command */
1849         p = qbman_swp_mc_start(s);
1850         if (!p)
1851                 return -EBUSY;
1852
1853         /* Encode the caller-provided attributes */
1854         p->ch = channelid;
1855         p->we = we_mask;
1856         if (cdan_en)
1857                 p->ctrl = 1;
1858         else
1859                 p->ctrl = 0;
1860         p->cdan_ctx = ctx;
1861
1862         /* Complete the management command */
1863         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1864         if (!r) {
1865                 pr_err("qbman: wqchan config failed, no response\n");
1866                 return -EIO;
1867         }
1868
1869         /* Decode the outcome */
1870         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
1871                      != QBMAN_WQCHAN_CONFIGURE);
1872
1873         /* Determine success or failure */
1874         if (r->rslt != QBMAN_MC_RSLT_OK) {
1875                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
1876                        channelid, r->rslt);
1877                 return -EIO;
1878         }
1879
1880         return 0;
1881 }
1882
1883 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1884                                uint64_t ctx)
1885 {
1886         return qbman_swp_CDAN_set(s, channelid,
1887                                   CODE_CDAN_WE_CTX,
1888                                   0, ctx);
1889 }
1890
1891 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
1892 {
1893         return qbman_swp_CDAN_set(s, channelid,
1894                                   CODE_CDAN_WE_EN,
1895                                   1, 0);
1896 }
1897
1898 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
1899 {
1900         return qbman_swp_CDAN_set(s, channelid,
1901                                   CODE_CDAN_WE_EN,
1902                                   0, 0);
1903 }
1904
1905 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1906                                       uint64_t ctx)
1907 {
1908         return qbman_swp_CDAN_set(s, channelid,
1909                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1910                                   1, ctx);
1911 }
1912
1913 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
1914 {
1915         return QBMAN_IDX_FROM_DQRR(dqrr);
1916 }
1917
1918 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
1919 {
1920         struct qbman_result *dq;
1921
1922         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
1923         return dq;
1924 }