bus/fslmc: use CINH read on LS1088 platform
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 /* Internal Function declaration */
65 static int
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67                 const struct qbman_eq_desc *d,
68                 const struct qbman_fd *fd);
69 static int
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71                 const struct qbman_eq_desc *d,
72                 const struct qbman_fd *fd);
73
74 static int
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76                 const struct qbman_eq_desc *d,
77                 const struct qbman_fd *fd);
78 static int
79 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
80                 const struct qbman_eq_desc *d,
81                 const struct qbman_fd *fd);
82 static int
83 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
84                 const struct qbman_eq_desc *d,
85                 const struct qbman_fd *fd);
86
87 static int
88 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
89                 const struct qbman_eq_desc *d,
90                 const struct qbman_fd *fd,
91                 uint32_t *flags,
92                 int num_frames);
93 static int
94 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
95                 const struct qbman_eq_desc *d,
96                 const struct qbman_fd *fd,
97                 uint32_t *flags,
98                 int num_frames);
99 static int
100 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
101                 const struct qbman_eq_desc *d,
102                 const struct qbman_fd *fd,
103                 uint32_t *flags,
104                 int num_frames);
105
106 static int
107 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
108                 const struct qbman_eq_desc *d,
109                 struct qbman_fd **fd,
110                 uint32_t *flags,
111                 int num_frames);
112 static int
113 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
114                 const struct qbman_eq_desc *d,
115                 struct qbman_fd **fd,
116                 uint32_t *flags,
117                 int num_frames);
118 static int
119 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
120                 const struct qbman_eq_desc *d,
121                 struct qbman_fd **fd,
122                 uint32_t *flags,
123                 int num_frames);
124
125 static int
126 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
127                 const struct qbman_eq_desc *d,
128                 const struct qbman_fd *fd,
129                 int num_frames);
130 static int
131 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
132                 const struct qbman_eq_desc *d,
133                 const struct qbman_fd *fd,
134                 int num_frames);
135 static int
136 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
137                 const struct qbman_eq_desc *d,
138                 const struct qbman_fd *fd,
139                 int num_frames);
140
141 static int
142 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
143 static int
144 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
145
146 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
147 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
148
149 static int
150 qbman_swp_release_direct(struct qbman_swp *s,
151                 const struct qbman_release_desc *d,
152                 const uint64_t *buffers, unsigned int num_buffers);
153 static int
154 qbman_swp_release_mem_back(struct qbman_swp *s,
155                 const struct qbman_release_desc *d,
156                 const uint64_t *buffers, unsigned int num_buffers);
157
158 /* Function pointers */
159 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
160                 const struct qbman_eq_desc *d,
161                 const struct qbman_fd *fd)
162         = qbman_swp_enqueue_array_mode_direct;
163
164 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
165                 const struct qbman_eq_desc *d,
166                 const struct qbman_fd *fd)
167         = qbman_swp_enqueue_ring_mode_direct;
168
169 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
170                 const struct qbman_eq_desc *d,
171                 const struct qbman_fd *fd,
172                 uint32_t *flags,
173                 int num_frames)
174         = qbman_swp_enqueue_multiple_direct;
175
176 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
177                 const struct qbman_eq_desc *d,
178                 struct qbman_fd **fd,
179                 uint32_t *flags,
180                 int num_frames)
181         = qbman_swp_enqueue_multiple_fd_direct;
182
183 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
184                 const struct qbman_eq_desc *d,
185                 const struct qbman_fd *fd,
186                 int num_frames)
187         = qbman_swp_enqueue_multiple_desc_direct;
188
189 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
190                 struct qbman_pull_desc *d)
191         = qbman_swp_pull_direct;
192
193 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
194                 = qbman_swp_dqrr_next_direct;
195
196 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
197                         const struct qbman_release_desc *d,
198                         const uint64_t *buffers, unsigned int num_buffers)
199                         = qbman_swp_release_direct;
200
201 /*********************************/
202 /* Portal constructor/destructor */
203 /*********************************/
204
205 /* Software portals should always be in the power-on state when we initialise,
206  * due to the CCSR-based portal reset functionality that MC has.
207  *
208  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
209  * valid-bits, so we need to support a workaround where we don't trust
210  * valid-bits when detecting new entries until any stale ring entries have been
211  * overwritten at least once. The idea is that we read PI for the first few
212  * entries, then switch to valid-bit after that. The trick is to clear the
213  * bug-work-around boolean once the PI wraps around the ring for the first time.
214  *
215  * Note: this still carries a slight additional cost once the decrementer hits
216  * zero.
217  */
218 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
219 {
220         int ret;
221         uint32_t eqcr_pi;
222         uint32_t mask_size;
223         struct qbman_swp *p = malloc(sizeof(*p));
224
225         if (!p)
226                 return NULL;
227
228         memset(p, 0, sizeof(struct qbman_swp));
229
230         p->desc = *d;
231 #ifdef QBMAN_CHECKING
232         p->mc.check = swp_mc_can_start;
233 #endif
234         p->mc.valid_bit = QB_VALID_BIT;
235         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
236         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
237         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
238         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
239                         && (d->cena_access_mode == qman_cena_fastest_access))
240                 p->mr.valid_bit = QB_VALID_BIT;
241
242         atomic_set(&p->vdq.busy, 1);
243         p->vdq.valid_bit = QB_VALID_BIT;
244         p->dqrr.valid_bit = QB_VALID_BIT;
245         qman_version = p->desc.qman_version;
246         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
247                 p->dqrr.dqrr_size = 4;
248                 p->dqrr.reset_bug = 1;
249         } else {
250                 p->dqrr.dqrr_size = 8;
251                 p->dqrr.reset_bug = 0;
252         }
253
254         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
255         if (ret) {
256                 free(p);
257                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
258                 return NULL;
259         }
260
261         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
262          * in default state which is an error
263          */
264         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
265                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
266                 free(p);
267                 return NULL;
268         }
269
270         /* SDQCR needs to be initialized to 0 when no channels are
271          * being dequeued from or else the QMan HW will indicate an
272          * error.  The values that were calculated above will be
273          * applied when dequeues from a specific channel are enabled.
274          */
275         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
276
277         p->eqcr.pi_ring_size = 8;
278         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
279                         && (d->cena_access_mode == qman_cena_fastest_access)) {
280                 p->eqcr.pi_ring_size = 32;
281                 qbman_swp_enqueue_array_mode_ptr =
282                                 qbman_swp_enqueue_array_mode_mem_back;
283                 qbman_swp_enqueue_ring_mode_ptr =
284                                 qbman_swp_enqueue_ring_mode_mem_back;
285                 qbman_swp_enqueue_multiple_ptr =
286                                 qbman_swp_enqueue_multiple_mem_back;
287                 qbman_swp_enqueue_multiple_fd_ptr =
288                                 qbman_swp_enqueue_multiple_fd_mem_back;
289                 qbman_swp_enqueue_multiple_desc_ptr =
290                                 qbman_swp_enqueue_multiple_desc_mem_back;
291                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
292                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
293                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
294         }
295
296         if (dpaa2_svr_family == SVR_LS1080A) {
297                 qbman_swp_enqueue_ring_mode_ptr =
298                                 qbman_swp_enqueue_ring_mode_cinh_direct;
299                 qbman_swp_enqueue_multiple_ptr =
300                                 qbman_swp_enqueue_multiple_cinh_direct;
301                 qbman_swp_enqueue_multiple_fd_ptr =
302                                 qbman_swp_enqueue_multiple_fd_cinh_direct;
303                 qbman_swp_enqueue_multiple_desc_ptr =
304                                 qbman_swp_enqueue_multiple_desc_cinh_direct;
305         }
306
307         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
308                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
309         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
310         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
311         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
312         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
313                         && (d->cena_access_mode == qman_cena_fastest_access))
314                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
315                                              & p->eqcr.pi_ci_mask;
316         else
317                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
318                                              & p->eqcr.pi_ci_mask;
319         p->eqcr.available = p->eqcr.pi_ring_size -
320                                 qm_cyc_diff(p->eqcr.pi_ring_size,
321                                 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
322                                 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
323
324         portal_idx_map[p->desc.idx] = p;
325         return p;
326 }
327
328 void qbman_swp_finish(struct qbman_swp *p)
329 {
330 #ifdef QBMAN_CHECKING
331         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
332 #endif
333         qbman_swp_sys_finish(&p->sys);
334         portal_idx_map[p->desc.idx] = NULL;
335         free(p);
336 }
337
338 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
339 {
340         return &p->desc;
341 }
342
343 /**************/
344 /* Interrupts */
345 /**************/
346
347 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
348 {
349         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
350 }
351
352 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
353 {
354         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
355 }
356
357 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
358 {
359         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
360 }
361
362 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
363 {
364         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
365 }
366
367 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
368 {
369         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
370 }
371
372 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
373 {
374         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
375 }
376
377 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
378 {
379         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
380 }
381
382 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
383 {
384         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
385 }
386
387 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
388 {
389         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
390 }
391
392 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
393 {
394         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
395 }
396
397 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
398 {
399         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
400 }
401
402 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
403 {
404         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
405                          inhibit ? 0xffffffff : 0);
406 }
407
408 /***********************/
409 /* Management commands */
410 /***********************/
411
412 /*
413  * Internal code common to all types of management commands.
414  */
415
416 void *qbman_swp_mc_start(struct qbman_swp *p)
417 {
418         void *ret;
419 #ifdef QBMAN_CHECKING
420         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
421 #endif
422         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
423                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
424                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
425         else
426                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
427 #ifdef QBMAN_CHECKING
428         if (!ret)
429                 p->mc.check = swp_mc_can_submit;
430 #endif
431         return ret;
432 }
433
434 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
435 {
436         uint8_t *v = cmd;
437 #ifdef QBMAN_CHECKING
438         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
439 #endif
440         /* TBD: "|=" is going to hurt performance. Need to move as many fields
441          * out of word zero, and for those that remain, the "OR" needs to occur
442          * at the caller side. This debug check helps to catch cases where the
443          * caller wants to OR but has forgotten to do so.
444          */
445         QBMAN_BUG_ON((*v & cmd_verb) != *v);
446         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
447                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
448                 *v = cmd_verb | p->mr.valid_bit;
449                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
450                 dma_wmb();
451                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
452         } else {
453                 dma_wmb();
454                 *v = cmd_verb | p->mc.valid_bit;
455                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
456                 clean(cmd);
457         }
458 #ifdef QBMAN_CHECKING
459         p->mc.check = swp_mc_can_poll;
460 #endif
461 }
462
463 void *qbman_swp_mc_result(struct qbman_swp *p)
464 {
465         uint32_t *ret, verb;
466 #ifdef QBMAN_CHECKING
467         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
468 #endif
469         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
470                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
471                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
472                 /* Command completed if the valid bit is toggled */
473                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
474                         return NULL;
475                 /* Remove the valid-bit -
476                  * command completed iff the rest is non-zero
477                  */
478                 verb = ret[0] & ~QB_VALID_BIT;
479                 if (!verb)
480                         return NULL;
481                 p->mr.valid_bit ^= QB_VALID_BIT;
482         } else {
483                 qbman_cena_invalidate_prefetch(&p->sys,
484                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
485                 ret = qbman_cena_read(&p->sys,
486                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
487                 /* Remove the valid-bit -
488                  * command completed iff the rest is non-zero
489                  */
490                 verb = ret[0] & ~QB_VALID_BIT;
491                 if (!verb)
492                         return NULL;
493                 p->mc.valid_bit ^= QB_VALID_BIT;
494         }
495 #ifdef QBMAN_CHECKING
496         p->mc.check = swp_mc_can_start;
497 #endif
498         return ret;
499 }
500
501 /***********/
502 /* Enqueue */
503 /***********/
504
505 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
506 enum qb_enqueue_commands {
507         enqueue_empty = 0,
508         enqueue_response_always = 1,
509         enqueue_rejects_to_fq = 2
510 };
511
512 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
513 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
514 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
515 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
516 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
517 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
518 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
519 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
520
521 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
522 {
523         memset(d, 0, sizeof(*d));
524 }
525
526 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
527 {
528         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
529         if (respond_success)
530                 d->eq.verb |= enqueue_response_always;
531         else
532                 d->eq.verb |= enqueue_rejects_to_fq;
533 }
534
535 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
536                            uint16_t opr_id, uint16_t seqnum, int incomplete)
537 {
538         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
539         if (respond_success)
540                 d->eq.verb |= enqueue_response_always;
541         else
542                 d->eq.verb |= enqueue_rejects_to_fq;
543
544         d->eq.orpid = opr_id;
545         d->eq.seqnum = seqnum;
546         if (incomplete)
547                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
548         else
549                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
550 }
551
552 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
553                                 uint16_t seqnum)
554 {
555         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
556         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
557         d->eq.orpid = opr_id;
558         d->eq.seqnum = seqnum;
559         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
560         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
561 }
562
563 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
564                                 uint16_t seqnum)
565 {
566         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
567         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
568         d->eq.orpid = opr_id;
569         d->eq.seqnum = seqnum;
570         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
571         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
572 }
573
574 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
575                                 dma_addr_t storage_phys,
576                                 int stash)
577 {
578         d->eq.rsp_addr = storage_phys;
579         d->eq.wae = stash;
580 }
581
582 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
583 {
584         d->eq.rspid = token;
585 }
586
587 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
588 {
589         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
590         d->eq.tgtid = fqid;
591 }
592
593 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
594                           uint16_t qd_bin, uint8_t qd_prio)
595 {
596         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
597         d->eq.tgtid = qdid;
598         d->eq.qdbin = qd_bin;
599         d->eq.qpri = qd_prio;
600 }
601
602 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
603 {
604         if (enable)
605                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
606         else
607                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
608 }
609
610 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
611                            uint8_t dqrr_idx, int park)
612 {
613         if (enable) {
614                 d->eq.dca = dqrr_idx;
615                 if (park)
616                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
617                 else
618                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
619                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
620         } else {
621                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
622         }
623 }
624
625 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
626 #define EQAR_VB(eqar)      ((eqar) & 0x80)
627 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
628
629 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
630                                                    uint8_t idx)
631 {
632         if (idx < 16)
633                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
634                                      QMAN_RT_MODE);
635         else
636                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
637                                      (idx - 16) * 4,
638                                      QMAN_RT_MODE);
639 }
640
641
642 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
643                                                const struct qbman_eq_desc *d,
644                                                const struct qbman_fd *fd)
645 {
646         uint32_t *p;
647         const uint32_t *cl = qb_cl(d);
648         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
649
650         pr_debug("EQAR=%08x\n", eqar);
651         if (!EQAR_SUCCESS(eqar))
652                 return -EBUSY;
653         p = qbman_cena_write_start_wo_shadow(&s->sys,
654                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
655         memcpy(&p[1], &cl[1], 28);
656         memcpy(&p[8], fd, sizeof(*fd));
657
658         /* Set the verb byte, have to substitute in the valid-bit */
659         dma_wmb();
660         p[0] = cl[0] | EQAR_VB(eqar);
661         qbman_cena_write_complete_wo_shadow(&s->sys,
662                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
663         return 0;
664 }
665 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
666                                                  const struct qbman_eq_desc *d,
667                                                  const struct qbman_fd *fd)
668 {
669         uint32_t *p;
670         const uint32_t *cl = qb_cl(d);
671         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
672
673         pr_debug("EQAR=%08x\n", eqar);
674         if (!EQAR_SUCCESS(eqar))
675                 return -EBUSY;
676         p = qbman_cena_write_start_wo_shadow(&s->sys,
677                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
678         memcpy(&p[1], &cl[1], 28);
679         memcpy(&p[8], fd, sizeof(*fd));
680
681         /* Set the verb byte, have to substitute in the valid-bit */
682         p[0] = cl[0] | EQAR_VB(eqar);
683         dma_wmb();
684         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
685         return 0;
686 }
687
688 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
689                                                const struct qbman_eq_desc *d,
690                                                const struct qbman_fd *fd)
691 {
692         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
693 }
694
695 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
696                                               const struct qbman_eq_desc *d,
697                                               const struct qbman_fd *fd)
698 {
699         uint32_t *p;
700         const uint32_t *cl = qb_cl(d);
701         uint32_t eqcr_ci, full_mask, half_mask;
702
703         half_mask = (s->eqcr.pi_ci_mask>>1);
704         full_mask = s->eqcr.pi_ci_mask;
705         if (!s->eqcr.available) {
706                 eqcr_ci = s->eqcr.ci;
707                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
708                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
709                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
710                                 eqcr_ci, s->eqcr.ci);
711                 if (!s->eqcr.available)
712                         return -EBUSY;
713         }
714
715         p = qbman_cena_write_start_wo_shadow(&s->sys,
716                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
717         memcpy(&p[1], &cl[1], 28);
718         memcpy(&p[8], fd, sizeof(*fd));
719         lwsync();
720
721         /* Set the verb byte, have to substitute in the valid-bit */
722         p[0] = cl[0] | s->eqcr.pi_vb;
723         qbman_cena_write_complete_wo_shadow(&s->sys,
724                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
725         s->eqcr.pi++;
726         s->eqcr.pi &= full_mask;
727         s->eqcr.available--;
728         if (!(s->eqcr.pi & half_mask))
729                 s->eqcr.pi_vb ^= QB_VALID_BIT;
730
731         return 0;
732 }
733
734 static int qbman_swp_enqueue_ring_mode_cinh_direct(
735                 struct qbman_swp *s,
736                 const struct qbman_eq_desc *d,
737                 const struct qbman_fd *fd)
738 {
739         uint32_t *p;
740         const uint32_t *cl = qb_cl(d);
741         uint32_t eqcr_ci, full_mask, half_mask;
742
743         half_mask = (s->eqcr.pi_ci_mask>>1);
744         full_mask = s->eqcr.pi_ci_mask;
745         if (!s->eqcr.available) {
746                 eqcr_ci = s->eqcr.ci;
747                 s->eqcr.ci = qbman_cinh_read(&s->sys,
748                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
749                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
750                                 eqcr_ci, s->eqcr.ci);
751                 if (!s->eqcr.available)
752                         return -EBUSY;
753         }
754
755         p = qbman_cena_write_start_wo_shadow(&s->sys,
756                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
757         memcpy(&p[1], &cl[1], 28);
758         memcpy(&p[8], fd, sizeof(*fd));
759         lwsync();
760
761         /* Set the verb byte, have to substitute in the valid-bit */
762         p[0] = cl[0] | s->eqcr.pi_vb;
763         qbman_cena_write_complete_wo_shadow(&s->sys,
764                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
765         s->eqcr.pi++;
766         s->eqcr.pi &= full_mask;
767         s->eqcr.available--;
768         if (!(s->eqcr.pi & half_mask))
769                 s->eqcr.pi_vb ^= QB_VALID_BIT;
770
771         return 0;
772 }
773
774 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
775                                                 const struct qbman_eq_desc *d,
776                                                 const struct qbman_fd *fd)
777 {
778         uint32_t *p;
779         const uint32_t *cl = qb_cl(d);
780         uint32_t eqcr_ci, full_mask, half_mask;
781
782         half_mask = (s->eqcr.pi_ci_mask>>1);
783         full_mask = s->eqcr.pi_ci_mask;
784         if (!s->eqcr.available) {
785                 eqcr_ci = s->eqcr.ci;
786                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
787                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
788                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
789                                 eqcr_ci, s->eqcr.ci);
790                 if (!s->eqcr.available)
791                         return -EBUSY;
792         }
793
794         p = qbman_cena_write_start_wo_shadow(&s->sys,
795                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
796         memcpy(&p[1], &cl[1], 28);
797         memcpy(&p[8], fd, sizeof(*fd));
798
799         /* Set the verb byte, have to substitute in the valid-bit */
800         p[0] = cl[0] | s->eqcr.pi_vb;
801         s->eqcr.pi++;
802         s->eqcr.pi &= full_mask;
803         s->eqcr.available--;
804         if (!(s->eqcr.pi & half_mask))
805                 s->eqcr.pi_vb ^= QB_VALID_BIT;
806         dma_wmb();
807         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
808                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
809         return 0;
810 }
811
812 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
813                                        const struct qbman_eq_desc *d,
814                                        const struct qbman_fd *fd)
815 {
816         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
817 }
818
819 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
820                       const struct qbman_fd *fd)
821 {
822         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
823                 return qbman_swp_enqueue_array_mode(s, d, fd);
824         else    /* Use ring mode by default */
825                 return qbman_swp_enqueue_ring_mode(s, d, fd);
826 }
827
828 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
829                                              const struct qbman_eq_desc *d,
830                                              const struct qbman_fd *fd,
831                                              uint32_t *flags,
832                                              int num_frames)
833 {
834         uint32_t *p = NULL;
835         const uint32_t *cl = qb_cl(d);
836         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
837         int i, num_enqueued = 0;
838         uint64_t addr_cena;
839
840         half_mask = (s->eqcr.pi_ci_mask>>1);
841         full_mask = s->eqcr.pi_ci_mask;
842         if (!s->eqcr.available) {
843                 eqcr_ci = s->eqcr.ci;
844                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
845                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
846                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
847                                 eqcr_ci, s->eqcr.ci);
848                 if (!s->eqcr.available)
849                         return 0;
850         }
851
852         eqcr_pi = s->eqcr.pi;
853         num_enqueued = (s->eqcr.available < num_frames) ?
854                         s->eqcr.available : num_frames;
855         s->eqcr.available -= num_enqueued;
856         /* Fill in the EQCR ring */
857         for (i = 0; i < num_enqueued; i++) {
858                 p = qbman_cena_write_start_wo_shadow(&s->sys,
859                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
860                 memcpy(&p[1], &cl[1], 28);
861                 memcpy(&p[8], &fd[i], sizeof(*fd));
862                 eqcr_pi++;
863         }
864
865         lwsync();
866
867         /* Set the verb byte, have to substitute in the valid-bit */
868         eqcr_pi = s->eqcr.pi;
869         for (i = 0; i < num_enqueued; i++) {
870                 p = qbman_cena_write_start_wo_shadow(&s->sys,
871                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
872                 p[0] = cl[0] | s->eqcr.pi_vb;
873                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
874                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
875
876                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
877                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
878                 }
879                 eqcr_pi++;
880                 if (!(eqcr_pi & half_mask))
881                         s->eqcr.pi_vb ^= QB_VALID_BIT;
882         }
883
884         /* Flush all the cacheline without load/store in between */
885         eqcr_pi = s->eqcr.pi;
886         addr_cena = (size_t)s->sys.addr_cena;
887         for (i = 0; i < num_enqueued; i++) {
888                 dcbf((uintptr_t)(addr_cena +
889                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
890                 eqcr_pi++;
891         }
892         s->eqcr.pi = eqcr_pi & full_mask;
893
894         return num_enqueued;
895 }
896
897 static int qbman_swp_enqueue_multiple_cinh_direct(
898                 struct qbman_swp *s,
899                 const struct qbman_eq_desc *d,
900                 const struct qbman_fd *fd,
901                 uint32_t *flags,
902                 int num_frames)
903 {
904         uint32_t *p = NULL;
905         const uint32_t *cl = qb_cl(d);
906         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
907         int i, num_enqueued = 0;
908         uint64_t addr_cena;
909
910         half_mask = (s->eqcr.pi_ci_mask>>1);
911         full_mask = s->eqcr.pi_ci_mask;
912         if (!s->eqcr.available) {
913                 eqcr_ci = s->eqcr.ci;
914                 s->eqcr.ci = qbman_cinh_read(&s->sys,
915                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
916                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
917                                 eqcr_ci, s->eqcr.ci);
918                 if (!s->eqcr.available)
919                         return 0;
920         }
921
922         eqcr_pi = s->eqcr.pi;
923         num_enqueued = (s->eqcr.available < num_frames) ?
924                         s->eqcr.available : num_frames;
925         s->eqcr.available -= num_enqueued;
926         /* Fill in the EQCR ring */
927         for (i = 0; i < num_enqueued; i++) {
928                 p = qbman_cena_write_start_wo_shadow(&s->sys,
929                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
930                 memcpy(&p[1], &cl[1], 28);
931                 memcpy(&p[8], &fd[i], sizeof(*fd));
932                 eqcr_pi++;
933         }
934
935         lwsync();
936
937         /* Set the verb byte, have to substitute in the valid-bit */
938         eqcr_pi = s->eqcr.pi;
939         for (i = 0; i < num_enqueued; i++) {
940                 p = qbman_cena_write_start_wo_shadow(&s->sys,
941                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
942                 p[0] = cl[0] | s->eqcr.pi_vb;
943                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
944                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
945
946                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
947                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
948                 }
949                 eqcr_pi++;
950                 if (!(eqcr_pi & half_mask))
951                         s->eqcr.pi_vb ^= QB_VALID_BIT;
952         }
953
954         /* Flush all the cacheline without load/store in between */
955         eqcr_pi = s->eqcr.pi;
956         addr_cena = (size_t)s->sys.addr_cena;
957         for (i = 0; i < num_enqueued; i++) {
958                 dcbf(addr_cena +
959                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
960                 eqcr_pi++;
961         }
962         s->eqcr.pi = eqcr_pi & full_mask;
963
964         return num_enqueued;
965 }
966
967 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
968                                                const struct qbman_eq_desc *d,
969                                                const struct qbman_fd *fd,
970                                                uint32_t *flags,
971                                                int num_frames)
972 {
973         uint32_t *p = NULL;
974         const uint32_t *cl = qb_cl(d);
975         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
976         int i, num_enqueued = 0;
977
978         half_mask = (s->eqcr.pi_ci_mask>>1);
979         full_mask = s->eqcr.pi_ci_mask;
980         if (!s->eqcr.available) {
981                 eqcr_ci = s->eqcr.ci;
982                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
983                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
984                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
985                                         eqcr_ci, s->eqcr.ci);
986                 if (!s->eqcr.available)
987                         return 0;
988         }
989
990         eqcr_pi = s->eqcr.pi;
991         num_enqueued = (s->eqcr.available < num_frames) ?
992                         s->eqcr.available : num_frames;
993         s->eqcr.available -= num_enqueued;
994         /* Fill in the EQCR ring */
995         for (i = 0; i < num_enqueued; i++) {
996                 p = qbman_cena_write_start_wo_shadow(&s->sys,
997                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
998                 memcpy(&p[1], &cl[1], 28);
999                 memcpy(&p[8], &fd[i], sizeof(*fd));
1000                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1001                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1002
1003                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1004                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1005                 }
1006                 eqcr_pi++;
1007                 p[0] = cl[0] | s->eqcr.pi_vb;
1008
1009                 if (!(eqcr_pi & half_mask))
1010                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1011         }
1012         s->eqcr.pi = eqcr_pi & full_mask;
1013
1014         dma_wmb();
1015         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1016                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1017         return num_enqueued;
1018 }
1019
1020 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1021                                       const struct qbman_eq_desc *d,
1022                                       const struct qbman_fd *fd,
1023                                       uint32_t *flags,
1024                                       int num_frames)
1025 {
1026         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
1027 }
1028
1029 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1030                                                 const struct qbman_eq_desc *d,
1031                                                 struct qbman_fd **fd,
1032                                                 uint32_t *flags,
1033                                                 int num_frames)
1034 {
1035         uint32_t *p = NULL;
1036         const uint32_t *cl = qb_cl(d);
1037         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1038         int i, num_enqueued = 0;
1039         uint64_t addr_cena;
1040
1041         half_mask = (s->eqcr.pi_ci_mask>>1);
1042         full_mask = s->eqcr.pi_ci_mask;
1043         if (!s->eqcr.available) {
1044                 eqcr_ci = s->eqcr.ci;
1045                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1046                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1047                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1048                                 eqcr_ci, s->eqcr.ci);
1049                 if (!s->eqcr.available)
1050                         return 0;
1051         }
1052
1053         eqcr_pi = s->eqcr.pi;
1054         num_enqueued = (s->eqcr.available < num_frames) ?
1055                         s->eqcr.available : num_frames;
1056         s->eqcr.available -= num_enqueued;
1057         /* Fill in the EQCR ring */
1058         for (i = 0; i < num_enqueued; i++) {
1059                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1060                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1061                 memcpy(&p[1], &cl[1], 28);
1062                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1063                 eqcr_pi++;
1064         }
1065
1066         lwsync();
1067
1068         /* Set the verb byte, have to substitute in the valid-bit */
1069         eqcr_pi = s->eqcr.pi;
1070         for (i = 0; i < num_enqueued; i++) {
1071                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1072                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1073                 p[0] = cl[0] | s->eqcr.pi_vb;
1074                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1075                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1076
1077                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1078                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1079                 }
1080                 eqcr_pi++;
1081                 if (!(eqcr_pi & half_mask))
1082                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1083         }
1084
1085         /* Flush all the cacheline without load/store in between */
1086         eqcr_pi = s->eqcr.pi;
1087         addr_cena = (size_t)s->sys.addr_cena;
1088         for (i = 0; i < num_enqueued; i++) {
1089                 dcbf(addr_cena +
1090                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1091                 eqcr_pi++;
1092         }
1093         s->eqcr.pi = eqcr_pi & full_mask;
1094
1095         return num_enqueued;
1096 }
1097
1098 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1099                 struct qbman_swp *s,
1100                 const struct qbman_eq_desc *d,
1101                 struct qbman_fd **fd,
1102                 uint32_t *flags,
1103                 int num_frames)
1104 {
1105         uint32_t *p = NULL;
1106         const uint32_t *cl = qb_cl(d);
1107         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1108         int i, num_enqueued = 0;
1109         uint64_t addr_cena;
1110
1111         half_mask = (s->eqcr.pi_ci_mask>>1);
1112         full_mask = s->eqcr.pi_ci_mask;
1113         if (!s->eqcr.available) {
1114                 eqcr_ci = s->eqcr.ci;
1115                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1116                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1117                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1118                                 eqcr_ci, s->eqcr.ci);
1119                 if (!s->eqcr.available)
1120                         return 0;
1121         }
1122
1123         eqcr_pi = s->eqcr.pi;
1124         num_enqueued = (s->eqcr.available < num_frames) ?
1125                         s->eqcr.available : num_frames;
1126         s->eqcr.available -= num_enqueued;
1127         /* Fill in the EQCR ring */
1128         for (i = 0; i < num_enqueued; i++) {
1129                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1130                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1131                 memcpy(&p[1], &cl[1], 28);
1132                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1133                 eqcr_pi++;
1134         }
1135
1136         lwsync();
1137
1138         /* Set the verb byte, have to substitute in the valid-bit */
1139         eqcr_pi = s->eqcr.pi;
1140         for (i = 0; i < num_enqueued; i++) {
1141                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1142                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1143                 p[0] = cl[0] | s->eqcr.pi_vb;
1144                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1145                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1146
1147                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1148                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1149                 }
1150                 eqcr_pi++;
1151                 if (!(eqcr_pi & half_mask))
1152                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1153         }
1154
1155         /* Flush all the cacheline without load/store in between */
1156         eqcr_pi = s->eqcr.pi;
1157         addr_cena = (size_t)s->sys.addr_cena;
1158         for (i = 0; i < num_enqueued; i++) {
1159                 dcbf(addr_cena +
1160                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1161                 eqcr_pi++;
1162         }
1163         s->eqcr.pi = eqcr_pi & full_mask;
1164
1165         return num_enqueued;
1166 }
1167
1168 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1169                                                   const struct qbman_eq_desc *d,
1170                                                   struct qbman_fd **fd,
1171                                                   uint32_t *flags,
1172                                                   int num_frames)
1173 {
1174         uint32_t *p = NULL;
1175         const uint32_t *cl = qb_cl(d);
1176         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1177         int i, num_enqueued = 0;
1178
1179         half_mask = (s->eqcr.pi_ci_mask>>1);
1180         full_mask = s->eqcr.pi_ci_mask;
1181         if (!s->eqcr.available) {
1182                 eqcr_ci = s->eqcr.ci;
1183                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1184                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1185                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1186                                         eqcr_ci, s->eqcr.ci);
1187                 if (!s->eqcr.available)
1188                         return 0;
1189         }
1190
1191         eqcr_pi = s->eqcr.pi;
1192         num_enqueued = (s->eqcr.available < num_frames) ?
1193                         s->eqcr.available : num_frames;
1194         s->eqcr.available -= num_enqueued;
1195         /* Fill in the EQCR ring */
1196         for (i = 0; i < num_enqueued; i++) {
1197                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1198                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1199                 memcpy(&p[1], &cl[1], 28);
1200                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1201                 eqcr_pi++;
1202         }
1203
1204         /* Set the verb byte, have to substitute in the valid-bit */
1205         eqcr_pi = s->eqcr.pi;
1206         for (i = 0; i < num_enqueued; i++) {
1207                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1208                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1209                 p[0] = cl[0] | s->eqcr.pi_vb;
1210                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1211                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1212
1213                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1214                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1215                 }
1216                 eqcr_pi++;
1217                 if (!(eqcr_pi & half_mask))
1218                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1219         }
1220         s->eqcr.pi = eqcr_pi & full_mask;
1221
1222         dma_wmb();
1223         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1224                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1225         return num_enqueued;
1226 }
1227
1228 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1229                                          const struct qbman_eq_desc *d,
1230                                          struct qbman_fd **fd,
1231                                          uint32_t *flags,
1232                                          int num_frames)
1233 {
1234         return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
1235 }
1236
1237 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1238                                         const struct qbman_eq_desc *d,
1239                                         const struct qbman_fd *fd,
1240                                         int num_frames)
1241 {
1242         uint32_t *p;
1243         const uint32_t *cl;
1244         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1245         int i, num_enqueued = 0;
1246         uint64_t addr_cena;
1247
1248         half_mask = (s->eqcr.pi_ci_mask>>1);
1249         full_mask = s->eqcr.pi_ci_mask;
1250         if (!s->eqcr.available) {
1251                 eqcr_ci = s->eqcr.ci;
1252                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1253                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1254                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1255                                         eqcr_ci, s->eqcr.ci);
1256                 if (!s->eqcr.available)
1257                         return 0;
1258         }
1259
1260         eqcr_pi = s->eqcr.pi;
1261         num_enqueued = (s->eqcr.available < num_frames) ?
1262                         s->eqcr.available : num_frames;
1263         s->eqcr.available -= num_enqueued;
1264         /* Fill in the EQCR ring */
1265         for (i = 0; i < num_enqueued; i++) {
1266                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1267                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1268                 cl = qb_cl(&d[i]);
1269                 memcpy(&p[1], &cl[1], 28);
1270                 memcpy(&p[8], &fd[i], sizeof(*fd));
1271                 eqcr_pi++;
1272         }
1273
1274         lwsync();
1275
1276         /* Set the verb byte, have to substitute in the valid-bit */
1277         eqcr_pi = s->eqcr.pi;
1278         for (i = 0; i < num_enqueued; i++) {
1279                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1280                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1281                 cl = qb_cl(&d[i]);
1282                 p[0] = cl[0] | s->eqcr.pi_vb;
1283                 eqcr_pi++;
1284                 if (!(eqcr_pi & half_mask))
1285                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1286         }
1287
1288         /* Flush all the cacheline without load/store in between */
1289         eqcr_pi = s->eqcr.pi;
1290         addr_cena = (size_t)s->sys.addr_cena;
1291         for (i = 0; i < num_enqueued; i++) {
1292                 dcbf((uintptr_t)(addr_cena +
1293                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1294                 eqcr_pi++;
1295         }
1296         s->eqcr.pi = eqcr_pi & full_mask;
1297
1298         return num_enqueued;
1299 }
1300
1301 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1302                 struct qbman_swp *s,
1303                 const struct qbman_eq_desc *d,
1304                 const struct qbman_fd *fd,
1305                 int num_frames)
1306 {
1307         uint32_t *p;
1308         const uint32_t *cl;
1309         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1310         int i, num_enqueued = 0;
1311         uint64_t addr_cena;
1312
1313         half_mask = (s->eqcr.pi_ci_mask>>1);
1314         full_mask = s->eqcr.pi_ci_mask;
1315         if (!s->eqcr.available) {
1316                 eqcr_ci = s->eqcr.ci;
1317                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1318                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1319                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1320                                         eqcr_ci, s->eqcr.ci);
1321                 if (!s->eqcr.available)
1322                         return 0;
1323         }
1324
1325         eqcr_pi = s->eqcr.pi;
1326         num_enqueued = (s->eqcr.available < num_frames) ?
1327                         s->eqcr.available : num_frames;
1328         s->eqcr.available -= num_enqueued;
1329         /* Fill in the EQCR ring */
1330         for (i = 0; i < num_enqueued; i++) {
1331                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1332                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1333                 cl = qb_cl(&d[i]);
1334                 memcpy(&p[1], &cl[1], 28);
1335                 memcpy(&p[8], &fd[i], sizeof(*fd));
1336                 eqcr_pi++;
1337         }
1338
1339         lwsync();
1340
1341         /* Set the verb byte, have to substitute in the valid-bit */
1342         eqcr_pi = s->eqcr.pi;
1343         for (i = 0; i < num_enqueued; i++) {
1344                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1345                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1346                 cl = qb_cl(&d[i]);
1347                 p[0] = cl[0] | s->eqcr.pi_vb;
1348                 eqcr_pi++;
1349                 if (!(eqcr_pi & half_mask))
1350                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1351         }
1352
1353         /* Flush all the cacheline without load/store in between */
1354         eqcr_pi = s->eqcr.pi;
1355         addr_cena = (size_t)s->sys.addr_cena;
1356         for (i = 0; i < num_enqueued; i++) {
1357                 dcbf(addr_cena +
1358                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1359                 eqcr_pi++;
1360         }
1361         s->eqcr.pi = eqcr_pi & full_mask;
1362
1363         return num_enqueued;
1364 }
1365
1366 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1367                                         const struct qbman_eq_desc *d,
1368                                         const struct qbman_fd *fd,
1369                                         int num_frames)
1370 {
1371         uint32_t *p;
1372         const uint32_t *cl;
1373         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1374         int i, num_enqueued = 0;
1375
1376         half_mask = (s->eqcr.pi_ci_mask>>1);
1377         full_mask = s->eqcr.pi_ci_mask;
1378         if (!s->eqcr.available) {
1379                 eqcr_ci = s->eqcr.ci;
1380                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1381                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1382                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1383                                         eqcr_ci, s->eqcr.ci);
1384                 if (!s->eqcr.available)
1385                         return 0;
1386         }
1387
1388         eqcr_pi = s->eqcr.pi;
1389         num_enqueued = (s->eqcr.available < num_frames) ?
1390                         s->eqcr.available : num_frames;
1391         s->eqcr.available -= num_enqueued;
1392         /* Fill in the EQCR ring */
1393         for (i = 0; i < num_enqueued; i++) {
1394                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1395                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1396                 cl = qb_cl(&d[i]);
1397                 memcpy(&p[1], &cl[1], 28);
1398                 memcpy(&p[8], &fd[i], sizeof(*fd));
1399                 eqcr_pi++;
1400         }
1401
1402         /* Set the verb byte, have to substitute in the valid-bit */
1403         eqcr_pi = s->eqcr.pi;
1404         for (i = 0; i < num_enqueued; i++) {
1405                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1406                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1407                 cl = qb_cl(&d[i]);
1408                 p[0] = cl[0] | s->eqcr.pi_vb;
1409                 eqcr_pi++;
1410                 if (!(eqcr_pi & half_mask))
1411                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1412         }
1413
1414         s->eqcr.pi = eqcr_pi & full_mask;
1415
1416         dma_wmb();
1417         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1418                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1419
1420         return num_enqueued;
1421 }
1422 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1423                                            const struct qbman_eq_desc *d,
1424                                            const struct qbman_fd *fd,
1425                                            int num_frames)
1426 {
1427         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
1428 }
1429
1430 /*************************/
1431 /* Static (push) dequeue */
1432 /*************************/
1433
1434 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1435 {
1436         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1437
1438         QBMAN_BUG_ON(channel_idx > 15);
1439         *enabled = src | (1 << channel_idx);
1440 }
1441
1442 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1443 {
1444         uint16_t dqsrc;
1445
1446         QBMAN_BUG_ON(channel_idx > 15);
1447         if (enable)
1448                 s->sdq |= 1 << channel_idx;
1449         else
1450                 s->sdq &= ~(1 << channel_idx);
1451
1452         /* Read make the complete src map.  If no channels are enabled
1453          * the SDQCR must be 0 or else QMan will assert errors
1454          */
1455         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1456         if (dqsrc != 0)
1457                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1458         else
1459                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1460 }
1461
1462 /***************************/
1463 /* Volatile (pull) dequeue */
1464 /***************************/
1465
1466 /* These should be const, eventually */
1467 #define QB_VDQCR_VERB_DCT_SHIFT    0
1468 #define QB_VDQCR_VERB_DT_SHIFT     2
1469 #define QB_VDQCR_VERB_RLS_SHIFT    4
1470 #define QB_VDQCR_VERB_WAE_SHIFT    5
1471 #define QB_VDQCR_VERB_RAD_SHIFT    6
1472
1473 enum qb_pull_dt_e {
1474         qb_pull_dt_channel,
1475         qb_pull_dt_workqueue,
1476         qb_pull_dt_framequeue
1477 };
1478
1479 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1480 {
1481         memset(d, 0, sizeof(*d));
1482 }
1483
1484 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1485                                  struct qbman_result *storage,
1486                                  dma_addr_t storage_phys,
1487                                  int stash)
1488 {
1489         d->pull.rsp_addr_virt = (size_t)storage;
1490
1491         if (!storage) {
1492                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1493                 return;
1494         }
1495         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1496         if (stash)
1497                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1498         else
1499                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1500
1501         d->pull.rsp_addr = storage_phys;
1502 }
1503
1504 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1505                                    uint8_t numframes)
1506 {
1507         d->pull.numf = numframes - 1;
1508 }
1509
1510 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1511 {
1512         d->pull.tok = token;
1513 }
1514
1515 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1516 {
1517         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1518         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1519         d->pull.dq_src = fqid;
1520 }
1521
1522 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1523                             enum qbman_pull_type_e dct)
1524 {
1525         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1526         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1527         d->pull.dq_src = wqid;
1528 }
1529
1530 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1531                                  enum qbman_pull_type_e dct)
1532 {
1533         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1534         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1535         d->pull.dq_src = chid;
1536 }
1537
1538 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1539 {
1540         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1541                 if (rad)
1542                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1543                 else
1544                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1545         } else {
1546                 printf("The RAD feature is not valid when RLS = 0\n");
1547         }
1548 }
1549
1550 static int qbman_swp_pull_direct(struct qbman_swp *s,
1551                                  struct qbman_pull_desc *d)
1552 {
1553         uint32_t *p;
1554         uint32_t *cl = qb_cl(d);
1555
1556         if (!atomic_dec_and_test(&s->vdq.busy)) {
1557                 atomic_inc(&s->vdq.busy);
1558                 return -EBUSY;
1559         }
1560
1561         d->pull.tok = s->sys.idx + 1;
1562         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1563         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1564         memcpy(&p[1], &cl[1], 12);
1565
1566         /* Set the verb byte, have to substitute in the valid-bit */
1567         lwsync();
1568         p[0] = cl[0] | s->vdq.valid_bit;
1569         s->vdq.valid_bit ^= QB_VALID_BIT;
1570         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1571
1572         return 0;
1573 }
1574
1575 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1576                                    struct qbman_pull_desc *d)
1577 {
1578         uint32_t *p;
1579         uint32_t *cl = qb_cl(d);
1580
1581         if (!atomic_dec_and_test(&s->vdq.busy)) {
1582                 atomic_inc(&s->vdq.busy);
1583                 return -EBUSY;
1584         }
1585
1586         d->pull.tok = s->sys.idx + 1;
1587         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1588         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1589         memcpy(&p[1], &cl[1], 12);
1590
1591         /* Set the verb byte, have to substitute in the valid-bit */
1592         p[0] = cl[0] | s->vdq.valid_bit;
1593         s->vdq.valid_bit ^= QB_VALID_BIT;
1594         dma_wmb();
1595         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1596
1597         return 0;
1598 }
1599
1600 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1601 {
1602         return qbman_swp_pull_ptr(s, d);
1603 }
1604
1605 /****************/
1606 /* Polling DQRR */
1607 /****************/
1608
1609 #define QMAN_DQRR_PI_MASK              0xf
1610
1611 #define QBMAN_RESULT_DQ        0x60
1612 #define QBMAN_RESULT_FQRN      0x21
1613 #define QBMAN_RESULT_FQRNI     0x22
1614 #define QBMAN_RESULT_FQPN      0x24
1615 #define QBMAN_RESULT_FQDAN     0x25
1616 #define QBMAN_RESULT_CDAN      0x26
1617 #define QBMAN_RESULT_CSCN_MEM  0x27
1618 #define QBMAN_RESULT_CGCU      0x28
1619 #define QBMAN_RESULT_BPSCN     0x29
1620 #define QBMAN_RESULT_CSCN_WQ   0x2a
1621
1622 #include <rte_prefetch.h>
1623
1624 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1625 {
1626         const struct qbman_result *p;
1627
1628         p = qbman_cena_read_wo_shadow(&s->sys,
1629                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1630         rte_prefetch0(p);
1631 }
1632
1633 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1634  * only once, so repeated calls can return a sequence of DQRR entries, without
1635  * requiring they be consumed immediately or in any particular order.
1636  */
1637 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1638 {
1639         return qbman_swp_dqrr_next_ptr(s);
1640 }
1641
1642 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1643 {
1644         uint32_t verb;
1645         uint32_t response_verb;
1646         uint32_t flags;
1647         const struct qbman_result *p;
1648
1649         /* Before using valid-bit to detect if something is there, we have to
1650          * handle the case of the DQRR reset bug...
1651          */
1652         if (s->dqrr.reset_bug) {
1653                 /* We pick up new entries by cache-inhibited producer index,
1654                  * which means that a non-coherent mapping would require us to
1655                  * invalidate and read *only* once that PI has indicated that
1656                  * there's an entry here. The first trip around the DQRR ring
1657                  * will be much less efficient than all subsequent trips around
1658                  * it...
1659                  */
1660                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1661                              QMAN_DQRR_PI_MASK;
1662
1663                 /* there are new entries if pi != next_idx */
1664                 if (pi == s->dqrr.next_idx)
1665                         return NULL;
1666
1667                 /* if next_idx is/was the last ring index, and 'pi' is
1668                  * different, we can disable the workaround as all the ring
1669                  * entries have now been DMA'd to so valid-bit checking is
1670                  * repaired. Note: this logic needs to be based on next_idx
1671                  * (which increments one at a time), rather than on pi (which
1672                  * can burst and wrap-around between our snapshots of it).
1673                  */
1674                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1675                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1676                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1677                                  s->dqrr.next_idx, pi);
1678                         s->dqrr.reset_bug = 0;
1679                 }
1680                 qbman_cena_invalidate_prefetch(&s->sys,
1681                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1682         }
1683         p = qbman_cena_read_wo_shadow(&s->sys,
1684                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1685
1686         verb = p->dq.verb;
1687
1688         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1689          * in the DQRR reset bug workaround, we shouldn't need to skip these
1690          * check, because we've already determined that a new entry is available
1691          * and we've invalidated the cacheline before reading it, so the
1692          * valid-bit behaviour is repaired and should tell us what we already
1693          * knew from reading PI.
1694          */
1695         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1696                 return NULL;
1697
1698         /* There's something there. Move "next_idx" attention to the next ring
1699          * entry (and prefetch it) before returning what we found.
1700          */
1701         s->dqrr.next_idx++;
1702         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1703                 s->dqrr.next_idx = 0;
1704                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1705         }
1706         /* If this is the final response to a volatile dequeue command
1707          * indicate that the vdq is no longer busy
1708          */
1709         flags = p->dq.stat;
1710         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1711         if ((response_verb == QBMAN_RESULT_DQ) &&
1712             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1713             (flags & QBMAN_DQ_STAT_EXPIRED))
1714                 atomic_inc(&s->vdq.busy);
1715
1716         return p;
1717 }
1718
1719 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1720 {
1721         uint32_t verb;
1722         uint32_t response_verb;
1723         uint32_t flags;
1724         const struct qbman_result *p;
1725
1726         p = qbman_cena_read_wo_shadow(&s->sys,
1727                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1728
1729         verb = p->dq.verb;
1730
1731         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1732          * in the DQRR reset bug workaround, we shouldn't need to skip these
1733          * check, because we've already determined that a new entry is available
1734          * and we've invalidated the cacheline before reading it, so the
1735          * valid-bit behaviour is repaired and should tell us what we already
1736          * knew from reading PI.
1737          */
1738         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1739                 return NULL;
1740
1741         /* There's something there. Move "next_idx" attention to the next ring
1742          * entry (and prefetch it) before returning what we found.
1743          */
1744         s->dqrr.next_idx++;
1745         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1746                 s->dqrr.next_idx = 0;
1747                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1748         }
1749         /* If this is the final response to a volatile dequeue command
1750          * indicate that the vdq is no longer busy
1751          */
1752         flags = p->dq.stat;
1753         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1754         if ((response_verb == QBMAN_RESULT_DQ)
1755                         && (flags & QBMAN_DQ_STAT_VOLATILE)
1756                         && (flags & QBMAN_DQ_STAT_EXPIRED))
1757                 atomic_inc(&s->vdq.busy);
1758         return p;
1759 }
1760
1761 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1762 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1763                             const struct qbman_result *dq)
1764 {
1765         qbman_cinh_write(&s->sys,
1766                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1767 }
1768
1769 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1770 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1771                             uint8_t dqrr_index)
1772 {
1773         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1774 }
1775
1776 /*********************************/
1777 /* Polling user-provided storage */
1778 /*********************************/
1779
1780 int qbman_result_has_new_result(struct qbman_swp *s,
1781                                 struct qbman_result *dq)
1782 {
1783         if (dq->dq.tok == 0)
1784                 return 0;
1785
1786         /*
1787          * Set token to be 0 so we will detect change back to 1
1788          * next time the looping is traversed. Const is cast away here
1789          * as we want users to treat the dequeue responses as read only.
1790          */
1791         ((struct qbman_result *)dq)->dq.tok = 0;
1792
1793         /*
1794          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1795          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1796          * that makes it available. Eg. we may be looking at our 10th dequeue
1797          * result, having released VDQCR after the 1st result and it is now
1798          * busy due to some other command!
1799          */
1800         if (s->vdq.storage == dq) {
1801                 s->vdq.storage = NULL;
1802                 atomic_inc(&s->vdq.busy);
1803         }
1804
1805         return 1;
1806 }
1807
1808 int qbman_check_new_result(struct qbman_result *dq)
1809 {
1810         if (dq->dq.tok == 0)
1811                 return 0;
1812
1813         /*
1814          * Set token to be 0 so we will detect change back to 1
1815          * next time the looping is traversed. Const is cast away here
1816          * as we want users to treat the dequeue responses as read only.
1817          */
1818         ((struct qbman_result *)dq)->dq.tok = 0;
1819
1820         return 1;
1821 }
1822
1823 int qbman_check_command_complete(struct qbman_result *dq)
1824 {
1825         struct qbman_swp *s;
1826
1827         if (dq->dq.tok == 0)
1828                 return 0;
1829
1830         s = portal_idx_map[dq->dq.tok - 1];
1831         /*
1832          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1833          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1834          * that makes it available. Eg. we may be looking at our 10th dequeue
1835          * result, having released VDQCR after the 1st result and it is now
1836          * busy due to some other command!
1837          */
1838         if (s->vdq.storage == dq) {
1839                 s->vdq.storage = NULL;
1840                 atomic_inc(&s->vdq.busy);
1841         }
1842
1843         return 1;
1844 }
1845
1846 /********************************/
1847 /* Categorising qbman results   */
1848 /********************************/
1849
1850 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1851                                       uint8_t x)
1852 {
1853         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1854
1855         return (response_verb == x);
1856 }
1857
1858 int qbman_result_is_DQ(const struct qbman_result *dq)
1859 {
1860         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1861 }
1862
1863 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1864 {
1865         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1866 }
1867
1868 int qbman_result_is_CDAN(const struct qbman_result *dq)
1869 {
1870         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1871 }
1872
1873 int qbman_result_is_CSCN(const struct qbman_result *dq)
1874 {
1875         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1876                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1877 }
1878
1879 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1880 {
1881         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1882 }
1883
1884 int qbman_result_is_CGCU(const struct qbman_result *dq)
1885 {
1886         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1887 }
1888
1889 int qbman_result_is_FQRN(const struct qbman_result *dq)
1890 {
1891         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1892 }
1893
1894 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1895 {
1896         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1897 }
1898
1899 int qbman_result_is_FQPN(const struct qbman_result *dq)
1900 {
1901         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1902 }
1903
1904 /*********************************/
1905 /* Parsing frame dequeue results */
1906 /*********************************/
1907
1908 /* These APIs assume qbman_result_is_DQ() is TRUE */
1909
1910 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1911 {
1912         return dq->dq.stat;
1913 }
1914
1915 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1916 {
1917         return dq->dq.seqnum;
1918 }
1919
1920 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1921 {
1922         return dq->dq.oprid;
1923 }
1924
1925 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1926 {
1927         return dq->dq.fqid;
1928 }
1929
1930 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1931 {
1932         return dq->dq.fq_byte_cnt;
1933 }
1934
1935 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1936 {
1937         return dq->dq.fq_frm_cnt;
1938 }
1939
1940 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1941 {
1942         return dq->dq.fqd_ctx;
1943 }
1944
1945 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1946 {
1947         return (const struct qbman_fd *)&dq->dq.fd[0];
1948 }
1949
1950 /**************************************/
1951 /* Parsing state-change notifications */
1952 /**************************************/
1953 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1954 {
1955         return scn->scn.state;
1956 }
1957
1958 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1959 {
1960         return scn->scn.rid_tok;
1961 }
1962
1963 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1964 {
1965         return scn->scn.ctx;
1966 }
1967
1968 /*****************/
1969 /* Parsing BPSCN */
1970 /*****************/
1971 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1972 {
1973         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1974 }
1975
1976 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1977 {
1978         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1979 }
1980
1981 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1982 {
1983         return (int)(qbman_result_SCN_state(scn) & 0x2);
1984 }
1985
1986 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1987 {
1988         return (int)(qbman_result_SCN_state(scn) & 0x4);
1989 }
1990
1991 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1992 {
1993         return qbman_result_SCN_ctx(scn);
1994 }
1995
1996 /*****************/
1997 /* Parsing CGCU  */
1998 /*****************/
1999 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2000 {
2001         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2002 }
2003
2004 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2005 {
2006         return qbman_result_SCN_ctx(scn);
2007 }
2008
2009 /********************/
2010 /* Parsing EQ RESP  */
2011 /********************/
2012 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2013 {
2014         return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2015 }
2016
2017 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2018 {
2019         eqresp->eq_resp.rspid = val;
2020 }
2021
2022 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2023 {
2024         return eqresp->eq_resp.rspid;
2025 }
2026
2027 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2028 {
2029         if (eqresp->eq_resp.rc == 0xE)
2030                 return 0;
2031         else
2032                 return -1;
2033 }
2034
2035 /******************/
2036 /* Buffer release */
2037 /******************/
2038 #define QB_BR_RC_VALID_SHIFT  5
2039 #define QB_BR_RCDI_SHIFT      6
2040
2041 void qbman_release_desc_clear(struct qbman_release_desc *d)
2042 {
2043         memset(d, 0, sizeof(*d));
2044         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2045 }
2046
2047 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2048 {
2049         d->br.bpid = bpid;
2050 }
2051
2052 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2053 {
2054         if (enable)
2055                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2056         else
2057                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2058 }
2059
2060 #define RAR_IDX(rar)     ((rar) & 0x7)
2061 #define RAR_VB(rar)      ((rar) & 0x80)
2062 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2063
2064 static int qbman_swp_release_direct(struct qbman_swp *s,
2065                                     const struct qbman_release_desc *d,
2066                                     const uint64_t *buffers,
2067                                     unsigned int num_buffers)
2068 {
2069         uint32_t *p;
2070         const uint32_t *cl = qb_cl(d);
2071         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2072
2073         pr_debug("RAR=%08x\n", rar);
2074         if (!RAR_SUCCESS(rar))
2075                 return -EBUSY;
2076
2077         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2078
2079         /* Start the release command */
2080         p = qbman_cena_write_start_wo_shadow(&s->sys,
2081                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2082
2083         /* Copy the caller's buffer pointers to the command */
2084         u64_to_le32_copy(&p[2], buffers, num_buffers);
2085
2086         /* Set the verb byte, have to substitute in the valid-bit and the
2087          * number of buffers.
2088          */
2089         lwsync();
2090         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2091         qbman_cena_write_complete_wo_shadow(&s->sys,
2092                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2093
2094         return 0;
2095 }
2096
2097 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2098                                       const struct qbman_release_desc *d,
2099                                       const uint64_t *buffers,
2100                                       unsigned int num_buffers)
2101 {
2102         uint32_t *p;
2103         const uint32_t *cl = qb_cl(d);
2104         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2105
2106         pr_debug("RAR=%08x\n", rar);
2107         if (!RAR_SUCCESS(rar))
2108                 return -EBUSY;
2109
2110         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2111
2112         /* Start the release command */
2113         p = qbman_cena_write_start_wo_shadow(&s->sys,
2114                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2115
2116         /* Copy the caller's buffer pointers to the command */
2117         u64_to_le32_copy(&p[2], buffers, num_buffers);
2118
2119         /* Set the verb byte, have to substitute in the valid-bit and the
2120          * number of buffers.
2121          */
2122         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2123         lwsync();
2124         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2125                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2126
2127         return 0;
2128 }
2129
2130 inline int qbman_swp_release(struct qbman_swp *s,
2131                              const struct qbman_release_desc *d,
2132                              const uint64_t *buffers,
2133                              unsigned int num_buffers)
2134 {
2135         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2136 }
2137
2138 /*******************/
2139 /* Buffer acquires */
2140 /*******************/
2141 struct qbman_acquire_desc {
2142         uint8_t verb;
2143         uint8_t reserved;
2144         uint16_t bpid;
2145         uint8_t num;
2146         uint8_t reserved2[59];
2147 };
2148
2149 struct qbman_acquire_rslt {
2150         uint8_t verb;
2151         uint8_t rslt;
2152         uint16_t reserved;
2153         uint8_t num;
2154         uint8_t reserved2[3];
2155         uint64_t buf[7];
2156 };
2157
2158 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2159                       unsigned int num_buffers)
2160 {
2161         struct qbman_acquire_desc *p;
2162         struct qbman_acquire_rslt *r;
2163
2164         if (!num_buffers || (num_buffers > 7))
2165                 return -EINVAL;
2166
2167         /* Start the management command */
2168         p = qbman_swp_mc_start(s);
2169
2170         if (!p)
2171                 return -EBUSY;
2172
2173         /* Encode the caller-provided attributes */
2174         p->bpid = bpid;
2175         p->num = num_buffers;
2176
2177         /* Complete the management command */
2178         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2179         if (!r) {
2180                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2181                        bpid);
2182                 return -EIO;
2183         }
2184
2185         /* Decode the outcome */
2186         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2187
2188         /* Determine success or failure */
2189         if (r->rslt != QBMAN_MC_RSLT_OK) {
2190                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2191                        bpid, r->rslt);
2192                 return -EIO;
2193         }
2194
2195         QBMAN_BUG_ON(r->num > num_buffers);
2196
2197         /* Copy the acquired buffers to the caller's array */
2198         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2199
2200         return (int)r->num;
2201 }
2202
2203 /*****************/
2204 /* FQ management */
2205 /*****************/
2206 struct qbman_alt_fq_state_desc {
2207         uint8_t verb;
2208         uint8_t reserved[3];
2209         uint32_t fqid;
2210         uint8_t reserved2[56];
2211 };
2212
2213 struct qbman_alt_fq_state_rslt {
2214         uint8_t verb;
2215         uint8_t rslt;
2216         uint8_t reserved[62];
2217 };
2218
2219 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2220
2221 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2222                                   uint8_t alt_fq_verb)
2223 {
2224         struct qbman_alt_fq_state_desc *p;
2225         struct qbman_alt_fq_state_rslt *r;
2226
2227         /* Start the management command */
2228         p = qbman_swp_mc_start(s);
2229         if (!p)
2230                 return -EBUSY;
2231
2232         p->fqid = fqid & ALT_FQ_FQID_MASK;
2233
2234         /* Complete the management command */
2235         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2236         if (!r) {
2237                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2238                        alt_fq_verb);
2239                 return -EIO;
2240         }
2241
2242         /* Decode the outcome */
2243         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2244
2245         /* Determine success or failure */
2246         if (r->rslt != QBMAN_MC_RSLT_OK) {
2247                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2248                        fqid, alt_fq_verb, r->rslt);
2249                 return -EIO;
2250         }
2251
2252         return 0;
2253 }
2254
2255 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2256 {
2257         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2258 }
2259
2260 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2261 {
2262         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2263 }
2264
2265 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2266 {
2267         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2268 }
2269
2270 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2271 {
2272         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2273 }
2274
2275 /**********************/
2276 /* Channel management */
2277 /**********************/
2278
2279 struct qbman_cdan_ctrl_desc {
2280         uint8_t verb;
2281         uint8_t reserved;
2282         uint16_t ch;
2283         uint8_t we;
2284         uint8_t ctrl;
2285         uint16_t reserved2;
2286         uint64_t cdan_ctx;
2287         uint8_t reserved3[48];
2288
2289 };
2290
2291 struct qbman_cdan_ctrl_rslt {
2292         uint8_t verb;
2293         uint8_t rslt;
2294         uint16_t ch;
2295         uint8_t reserved[60];
2296 };
2297
2298 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2299  * would be irresponsible to expose it.
2300  */
2301 #define CODE_CDAN_WE_EN    0x1
2302 #define CODE_CDAN_WE_CTX   0x4
2303
2304 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2305                               uint8_t we_mask, uint8_t cdan_en,
2306                               uint64_t ctx)
2307 {
2308         struct qbman_cdan_ctrl_desc *p;
2309         struct qbman_cdan_ctrl_rslt *r;
2310
2311         /* Start the management command */
2312         p = qbman_swp_mc_start(s);
2313         if (!p)
2314                 return -EBUSY;
2315
2316         /* Encode the caller-provided attributes */
2317         p->ch = channelid;
2318         p->we = we_mask;
2319         if (cdan_en)
2320                 p->ctrl = 1;
2321         else
2322                 p->ctrl = 0;
2323         p->cdan_ctx = ctx;
2324
2325         /* Complete the management command */
2326         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2327         if (!r) {
2328                 pr_err("qbman: wqchan config failed, no response\n");
2329                 return -EIO;
2330         }
2331
2332         /* Decode the outcome */
2333         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2334                      != QBMAN_WQCHAN_CONFIGURE);
2335
2336         /* Determine success or failure */
2337         if (r->rslt != QBMAN_MC_RSLT_OK) {
2338                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2339                        channelid, r->rslt);
2340                 return -EIO;
2341         }
2342
2343         return 0;
2344 }
2345
2346 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2347                                uint64_t ctx)
2348 {
2349         return qbman_swp_CDAN_set(s, channelid,
2350                                   CODE_CDAN_WE_CTX,
2351                                   0, ctx);
2352 }
2353
2354 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2355 {
2356         return qbman_swp_CDAN_set(s, channelid,
2357                                   CODE_CDAN_WE_EN,
2358                                   1, 0);
2359 }
2360
2361 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2362 {
2363         return qbman_swp_CDAN_set(s, channelid,
2364                                   CODE_CDAN_WE_EN,
2365                                   0, 0);
2366 }
2367
2368 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2369                                       uint64_t ctx)
2370 {
2371         return qbman_swp_CDAN_set(s, channelid,
2372                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2373                                   1, ctx);
2374 }
2375
2376 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2377 {
2378         return QBMAN_IDX_FROM_DQRR(dqrr);
2379 }
2380
2381 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2382 {
2383         struct qbman_result *dq;
2384
2385         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
2386         return dq;
2387 }