common/dpaax/caamflib: fix build with musl
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018-2020 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 uint32_t qman_version;
65
66 /* Internal Function declaration */
67 static int
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69                 const struct qbman_eq_desc *d,
70                 const struct qbman_fd *fd);
71 static int
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73                 const struct qbman_eq_desc *d,
74                 const struct qbman_fd *fd);
75
76 static int
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78                 const struct qbman_eq_desc *d,
79                 const struct qbman_fd *fd);
80 static int
81 qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
82                 const struct qbman_eq_desc *d,
83                 const struct qbman_fd *fd);
84 static int
85 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
86                 const struct qbman_eq_desc *d,
87                 const struct qbman_fd *fd);
88 static int
89 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
90                 const struct qbman_eq_desc *d,
91                 const struct qbman_fd *fd);
92
93 static int
94 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
95                 const struct qbman_eq_desc *d,
96                 const struct qbman_fd *fd,
97                 uint32_t *flags,
98                 int num_frames);
99 static int
100 qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
101                 const struct qbman_eq_desc *d,
102                 const struct qbman_fd *fd,
103                 uint32_t *flags,
104                 int num_frames);
105 static int
106 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
107                 const struct qbman_eq_desc *d,
108                 const struct qbman_fd *fd,
109                 uint32_t *flags,
110                 int num_frames);
111 static int
112 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
113                 const struct qbman_eq_desc *d,
114                 const struct qbman_fd *fd,
115                 uint32_t *flags,
116                 int num_frames);
117
118 static int
119 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
120                 const struct qbman_eq_desc *d,
121                 struct qbman_fd **fd,
122                 uint32_t *flags,
123                 int num_frames);
124 static int
125 qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
126                 const struct qbman_eq_desc *d,
127                 struct qbman_fd **fd,
128                 uint32_t *flags,
129                 int num_frames);
130 static int
131 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
132                 const struct qbman_eq_desc *d,
133                 struct qbman_fd **fd,
134                 uint32_t *flags,
135                 int num_frames);
136 static int
137 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
138                 const struct qbman_eq_desc *d,
139                 struct qbman_fd **fd,
140                 uint32_t *flags,
141                 int num_frames);
142
143 static int
144 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
145                 const struct qbman_eq_desc *d,
146                 const struct qbman_fd *fd,
147                 int num_frames);
148 static int
149 qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
150                 const struct qbman_eq_desc *d,
151                 const struct qbman_fd *fd,
152                 int num_frames);
153 static int
154 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
155                 const struct qbman_eq_desc *d,
156                 const struct qbman_fd *fd,
157                 int num_frames);
158 static int
159 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
160                 const struct qbman_eq_desc *d,
161                 const struct qbman_fd *fd,
162                 int num_frames);
163
164 static int
165 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
166 static int
167 qbman_swp_pull_cinh_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
168 static int
169 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
170
171 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
172 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s);
173 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
174
175 static int
176 qbman_swp_release_direct(struct qbman_swp *s,
177                 const struct qbman_release_desc *d,
178                 const uint64_t *buffers, unsigned int num_buffers);
179 static int
180 qbman_swp_release_cinh_direct(struct qbman_swp *s,
181                 const struct qbman_release_desc *d,
182                 const uint64_t *buffers, unsigned int num_buffers);
183 static int
184 qbman_swp_release_mem_back(struct qbman_swp *s,
185                 const struct qbman_release_desc *d,
186                 const uint64_t *buffers, unsigned int num_buffers);
187
188 /* Function pointers */
189 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
190                 const struct qbman_eq_desc *d,
191                 const struct qbman_fd *fd)
192         = qbman_swp_enqueue_array_mode_direct;
193
194 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
195                 const struct qbman_eq_desc *d,
196                 const struct qbman_fd *fd)
197         = qbman_swp_enqueue_ring_mode_direct;
198
199 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
200                 const struct qbman_eq_desc *d,
201                 const struct qbman_fd *fd,
202                 uint32_t *flags,
203                 int num_frames)
204         = qbman_swp_enqueue_multiple_direct;
205
206 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
207                 const struct qbman_eq_desc *d,
208                 struct qbman_fd **fd,
209                 uint32_t *flags,
210                 int num_frames)
211         = qbman_swp_enqueue_multiple_fd_direct;
212
213 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
214                 const struct qbman_eq_desc *d,
215                 const struct qbman_fd *fd,
216                 int num_frames)
217         = qbman_swp_enqueue_multiple_desc_direct;
218
219 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
220                 struct qbman_pull_desc *d)
221         = qbman_swp_pull_direct;
222
223 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
224                 = qbman_swp_dqrr_next_direct;
225
226 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
227                         const struct qbman_release_desc *d,
228                         const uint64_t *buffers, unsigned int num_buffers)
229                         = qbman_swp_release_direct;
230
231 /*********************************/
232 /* Portal constructor/destructor */
233 /*********************************/
234
235 /* Software portals should always be in the power-on state when we initialise,
236  * due to the CCSR-based portal reset functionality that MC has.
237  *
238  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
239  * valid-bits, so we need to support a workaround where we don't trust
240  * valid-bits when detecting new entries until any stale ring entries have been
241  * overwritten at least once. The idea is that we read PI for the first few
242  * entries, then switch to valid-bit after that. The trick is to clear the
243  * bug-work-around boolean once the PI wraps around the ring for the first time.
244  *
245  * Note: this still carries a slight additional cost once the decrementer hits
246  * zero.
247  */
248 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
249 {
250         int ret;
251         uint32_t eqcr_pi;
252         uint32_t mask_size;
253         struct qbman_swp *p = malloc(sizeof(*p));
254
255         if (!p)
256                 return NULL;
257
258         memset(p, 0, sizeof(struct qbman_swp));
259
260         p->desc = *d;
261 #ifdef QBMAN_CHECKING
262         p->mc.check = swp_mc_can_start;
263 #endif
264         p->mc.valid_bit = QB_VALID_BIT;
265         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
266         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
267         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
268         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
269                         && (d->cena_access_mode == qman_cena_fastest_access))
270                 p->mr.valid_bit = QB_VALID_BIT;
271
272         atomic_set(&p->vdq.busy, 1);
273         p->vdq.valid_bit = QB_VALID_BIT;
274         p->dqrr.valid_bit = QB_VALID_BIT;
275         qman_version = p->desc.qman_version;
276         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
277                 p->dqrr.dqrr_size = 4;
278                 p->dqrr.reset_bug = 1;
279         } else {
280                 p->dqrr.dqrr_size = 8;
281                 p->dqrr.reset_bug = 0;
282         }
283
284         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
285         if (ret) {
286                 free(p);
287                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
288                 return NULL;
289         }
290
291         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
292          * in default state which is an error
293          */
294         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
295                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
296                 free(p);
297                 return NULL;
298         }
299
300         /* SDQCR needs to be initialized to 0 when no channels are
301          * being dequeued from or else the QMan HW will indicate an
302          * error.  The values that were calculated above will be
303          * applied when dequeues from a specific channel are enabled.
304          */
305         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
306
307         p->eqcr.pi_ring_size = 8;
308         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
309                         && (d->cena_access_mode == qman_cena_fastest_access)) {
310                 p->eqcr.pi_ring_size = 32;
311                 qbman_swp_enqueue_array_mode_ptr =
312                         qbman_swp_enqueue_array_mode_mem_back;
313                 qbman_swp_enqueue_ring_mode_ptr =
314                         qbman_swp_enqueue_ring_mode_mem_back;
315                 qbman_swp_enqueue_multiple_ptr =
316                         qbman_swp_enqueue_multiple_mem_back;
317                 qbman_swp_enqueue_multiple_fd_ptr =
318                         qbman_swp_enqueue_multiple_fd_mem_back;
319                 qbman_swp_enqueue_multiple_desc_ptr =
320                         qbman_swp_enqueue_multiple_desc_mem_back;
321                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
322                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
323                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
324         }
325
326         if (dpaa2_svr_family == SVR_LS1080A) {
327                 qbman_swp_enqueue_ring_mode_ptr =
328                         qbman_swp_enqueue_ring_mode_cinh_read_direct;
329                 qbman_swp_enqueue_multiple_ptr =
330                         qbman_swp_enqueue_multiple_cinh_read_direct;
331                 qbman_swp_enqueue_multiple_fd_ptr =
332                         qbman_swp_enqueue_multiple_fd_cinh_read_direct;
333                 qbman_swp_enqueue_multiple_desc_ptr =
334                         qbman_swp_enqueue_multiple_desc_cinh_read_direct;
335         }
336
337         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
338                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
339         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
340         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
341         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
342         p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
343                         & p->eqcr.pi_ci_mask;
344         p->eqcr.available = p->eqcr.pi_ring_size;
345
346         portal_idx_map[p->desc.idx] = p;
347         return p;
348 }
349
350 int qbman_swp_update(struct qbman_swp *p, int stash_off)
351 {
352         const struct qbman_swp_desc *d = &p->desc;
353         struct qbman_swp_sys *s = &p->sys;
354         int ret;
355
356         /* Nothing needs to be done for QBMAN rev > 5000 with fast access */
357         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
358                         && (d->cena_access_mode == qman_cena_fastest_access))
359                 return 0;
360
361         ret = qbman_swp_sys_update(s, d, p->dqrr.dqrr_size, stash_off);
362         if (ret) {
363                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
364                 return ret;
365         }
366
367         p->stash_off = stash_off;
368
369         return 0;
370 }
371
372 void qbman_swp_finish(struct qbman_swp *p)
373 {
374 #ifdef QBMAN_CHECKING
375         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
376 #endif
377         qbman_swp_sys_finish(&p->sys);
378         portal_idx_map[p->desc.idx] = NULL;
379         free(p);
380 }
381
382 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
383 {
384         return &p->desc;
385 }
386
387 /**************/
388 /* Interrupts */
389 /**************/
390
391 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
392 {
393         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
394 }
395
396 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
397 {
398         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
399 }
400
401 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
402 {
403         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
404 }
405
406 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
407 {
408         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
409 }
410
411 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
412 {
413         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
414 }
415
416 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
417 {
418         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
419 }
420
421 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
422 {
423         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
424 }
425
426 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
427 {
428         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
429 }
430
431 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
432 {
433         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
434 }
435
436 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
437 {
438         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
439 }
440
441 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
442 {
443         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
444 }
445
446 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
447 {
448         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
449                          inhibit ? 0xffffffff : 0);
450 }
451
452 /***********************/
453 /* Management commands */
454 /***********************/
455
456 /*
457  * Internal code common to all types of management commands.
458  */
459
460 void *qbman_swp_mc_start(struct qbman_swp *p)
461 {
462         void *ret;
463 #ifdef QBMAN_CHECKING
464         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
465 #endif
466         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
467                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
468                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
469         else
470                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
471 #ifdef QBMAN_CHECKING
472         if (!ret)
473                 p->mc.check = swp_mc_can_submit;
474 #endif
475         return ret;
476 }
477
478 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
479 {
480         uint8_t *v = cmd;
481 #ifdef QBMAN_CHECKING
482         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
483 #endif
484         /* TBD: "|=" is going to hurt performance. Need to move as many fields
485          * out of word zero, and for those that remain, the "OR" needs to occur
486          * at the caller side. This debug check helps to catch cases where the
487          * caller wants to OR but has forgotten to do so.
488          */
489         QBMAN_BUG_ON((*v & cmd_verb) != *v);
490         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
491                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
492                 *v = cmd_verb | p->mr.valid_bit;
493                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
494                 dma_wmb();
495                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
496         } else {
497                 dma_wmb();
498                 *v = cmd_verb | p->mc.valid_bit;
499                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
500                 clean(cmd);
501         }
502 #ifdef QBMAN_CHECKING
503         p->mc.check = swp_mc_can_poll;
504 #endif
505 }
506
507 void qbman_swp_mc_submit_cinh(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
508 {
509         uint8_t *v = cmd;
510 #ifdef QBMAN_CHECKING
511         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
512 #endif
513         /* TBD: "|=" is going to hurt performance. Need to move as many fields
514          * out of word zero, and for those that remain, the "OR" needs to occur
515          * at the caller side. This debug check helps to catch cases where the
516          * caller wants to OR but has forgotten to do so.
517          */
518         QBMAN_BUG_ON((*v & cmd_verb) != *v);
519         dma_wmb();
520         *v = cmd_verb | p->mc.valid_bit;
521         qbman_cinh_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
522         clean(cmd);
523 #ifdef QBMAN_CHECKING
524         p->mc.check = swp_mc_can_poll;
525 #endif
526 }
527
528 void *qbman_swp_mc_result(struct qbman_swp *p)
529 {
530         uint32_t *ret, verb;
531 #ifdef QBMAN_CHECKING
532         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
533 #endif
534         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
535                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
536                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
537                 /* Command completed if the valid bit is toggled */
538                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
539                         return NULL;
540                 /* Remove the valid-bit -
541                  * command completed iff the rest is non-zero
542                  */
543                 verb = ret[0] & ~QB_VALID_BIT;
544                 if (!verb)
545                         return NULL;
546                 p->mr.valid_bit ^= QB_VALID_BIT;
547         } else {
548                 qbman_cena_invalidate_prefetch(&p->sys,
549                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
550                 ret = qbman_cena_read(&p->sys,
551                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
552                 /* Remove the valid-bit -
553                  * command completed iff the rest is non-zero
554                  */
555                 verb = ret[0] & ~QB_VALID_BIT;
556                 if (!verb)
557                         return NULL;
558                 p->mc.valid_bit ^= QB_VALID_BIT;
559         }
560 #ifdef QBMAN_CHECKING
561         p->mc.check = swp_mc_can_start;
562 #endif
563         return ret;
564 }
565
566 void *qbman_swp_mc_result_cinh(struct qbman_swp *p)
567 {
568         uint32_t *ret, verb;
569 #ifdef QBMAN_CHECKING
570         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
571 #endif
572         ret = qbman_cinh_read_shadow(&p->sys,
573                               QBMAN_CENA_SWP_RR(p->mc.valid_bit));
574         /* Remove the valid-bit -
575          * command completed iff the rest is non-zero
576          */
577         verb = ret[0] & ~QB_VALID_BIT;
578         if (!verb)
579                 return NULL;
580         p->mc.valid_bit ^= QB_VALID_BIT;
581 #ifdef QBMAN_CHECKING
582         p->mc.check = swp_mc_can_start;
583 #endif
584         return ret;
585 }
586
587 /***********/
588 /* Enqueue */
589 /***********/
590
591 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
592 enum qb_enqueue_commands {
593         enqueue_empty = 0,
594         enqueue_response_always = 1,
595         enqueue_rejects_to_fq = 2
596 };
597
598 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
599 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
600 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
601 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
602 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
603 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
604 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
605 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
606
607 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
608 {
609         memset(d, 0, sizeof(*d));
610 }
611
612 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
613 {
614         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
615         if (respond_success)
616                 d->eq.verb |= enqueue_response_always;
617         else
618                 d->eq.verb |= enqueue_rejects_to_fq;
619 }
620
621 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
622                            uint16_t opr_id, uint16_t seqnum, int incomplete)
623 {
624         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
625         if (respond_success)
626                 d->eq.verb |= enqueue_response_always;
627         else
628                 d->eq.verb |= enqueue_rejects_to_fq;
629
630         d->eq.orpid = opr_id;
631         d->eq.seqnum = seqnum;
632         if (incomplete)
633                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
634         else
635                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
636 }
637
638 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
639                                 uint16_t seqnum)
640 {
641         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
642         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
643         d->eq.orpid = opr_id;
644         d->eq.seqnum = seqnum;
645         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
646         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
647 }
648
649 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
650                                 uint16_t seqnum)
651 {
652         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
653         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
654         d->eq.orpid = opr_id;
655         d->eq.seqnum = seqnum;
656         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
657         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
658 }
659
660 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
661                                 dma_addr_t storage_phys,
662                                 int stash)
663 {
664         d->eq.rsp_addr = storage_phys;
665         d->eq.wae = stash;
666 }
667
668 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
669 {
670         d->eq.rspid = token;
671 }
672
673 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
674 {
675         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
676         d->eq.tgtid = fqid;
677 }
678
679 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
680                           uint16_t qd_bin, uint8_t qd_prio)
681 {
682         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
683         d->eq.tgtid = qdid;
684         d->eq.qdbin = qd_bin;
685         d->eq.qpri = qd_prio;
686 }
687
688 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
689 {
690         if (enable)
691                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
692         else
693                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
694 }
695
696 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
697                            uint8_t dqrr_idx, int park)
698 {
699         if (enable) {
700                 d->eq.dca = dqrr_idx;
701                 if (park)
702                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
703                 else
704                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
705                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
706         } else {
707                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
708         }
709 }
710
711 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
712 #define EQAR_VB(eqar)      ((eqar) & 0x80)
713 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
714
715 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
716                                                    uint8_t idx)
717 {
718         if (idx < 16)
719                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
720                                      QMAN_RT_MODE);
721         else
722                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
723                                      (idx - 16) * 4,
724                                      QMAN_RT_MODE);
725 }
726
727 static void memcpy_byte_by_byte(void *to, const void *from, size_t n)
728 {
729         const uint8_t *src = from;
730         volatile uint8_t *dest = to;
731         size_t i;
732
733         for (i = 0; i < n; i++)
734                 dest[i] = src[i];
735 }
736
737
738 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
739                                                const struct qbman_eq_desc *d,
740                                                const struct qbman_fd *fd)
741 {
742         uint32_t *p;
743         const uint32_t *cl = qb_cl(d);
744         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
745
746         pr_debug("EQAR=%08x\n", eqar);
747         if (!EQAR_SUCCESS(eqar))
748                 return -EBUSY;
749         p = qbman_cena_write_start_wo_shadow(&s->sys,
750                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
751         memcpy(&p[1], &cl[1], 28);
752         memcpy(&p[8], fd, sizeof(*fd));
753
754         /* Set the verb byte, have to substitute in the valid-bit */
755         dma_wmb();
756         p[0] = cl[0] | EQAR_VB(eqar);
757         qbman_cena_write_complete_wo_shadow(&s->sys,
758                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
759         return 0;
760 }
761 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
762                                                  const struct qbman_eq_desc *d,
763                                                  const struct qbman_fd *fd)
764 {
765         uint32_t *p;
766         const uint32_t *cl = qb_cl(d);
767         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
768
769         pr_debug("EQAR=%08x\n", eqar);
770         if (!EQAR_SUCCESS(eqar))
771                 return -EBUSY;
772         p = qbman_cena_write_start_wo_shadow(&s->sys,
773                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
774         memcpy(&p[1], &cl[1], 28);
775         memcpy(&p[8], fd, sizeof(*fd));
776
777         /* Set the verb byte, have to substitute in the valid-bit */
778         p[0] = cl[0] | EQAR_VB(eqar);
779         dma_wmb();
780         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
781         return 0;
782 }
783
784 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
785                                                const struct qbman_eq_desc *d,
786                                                const struct qbman_fd *fd)
787 {
788         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
789 }
790
791 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
792                                               const struct qbman_eq_desc *d,
793                                               const struct qbman_fd *fd)
794 {
795         uint32_t *p;
796         const uint32_t *cl = qb_cl(d);
797         uint32_t eqcr_ci, full_mask, half_mask;
798
799         half_mask = (s->eqcr.pi_ci_mask>>1);
800         full_mask = s->eqcr.pi_ci_mask;
801         if (!s->eqcr.available) {
802                 eqcr_ci = s->eqcr.ci;
803                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
804                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
805                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
806                                 eqcr_ci, s->eqcr.ci);
807                 if (!s->eqcr.available)
808                         return -EBUSY;
809         }
810
811         p = qbman_cena_write_start_wo_shadow(&s->sys,
812                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
813         memcpy(&p[1], &cl[1], 28);
814         memcpy(&p[8], fd, sizeof(*fd));
815         lwsync();
816
817         /* Set the verb byte, have to substitute in the valid-bit */
818         p[0] = cl[0] | s->eqcr.pi_vb;
819         qbman_cena_write_complete_wo_shadow(&s->sys,
820                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
821         s->eqcr.pi++;
822         s->eqcr.pi &= full_mask;
823         s->eqcr.available--;
824         if (!(s->eqcr.pi & half_mask))
825                 s->eqcr.pi_vb ^= QB_VALID_BIT;
826
827         return 0;
828 }
829
830 static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
831                 struct qbman_swp *s,
832                 const struct qbman_eq_desc *d,
833                 const struct qbman_fd *fd)
834 {
835         uint32_t *p;
836         const uint32_t *cl = qb_cl(d);
837         uint32_t eqcr_ci, full_mask, half_mask;
838
839         half_mask = (s->eqcr.pi_ci_mask>>1);
840         full_mask = s->eqcr.pi_ci_mask;
841         if (!s->eqcr.available) {
842                 eqcr_ci = s->eqcr.ci;
843                 s->eqcr.ci = qbman_cinh_read(&s->sys,
844                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
845                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
846                                 eqcr_ci, s->eqcr.ci);
847                 if (!s->eqcr.available)
848                         return -EBUSY;
849         }
850
851         p = qbman_cinh_write_start_wo_shadow(&s->sys,
852                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
853         memcpy(&p[1], &cl[1], 28);
854         memcpy(&p[8], fd, sizeof(*fd));
855         lwsync();
856
857         /* Set the verb byte, have to substitute in the valid-bit */
858         p[0] = cl[0] | s->eqcr.pi_vb;
859         s->eqcr.pi++;
860         s->eqcr.pi &= full_mask;
861         s->eqcr.available--;
862         if (!(s->eqcr.pi & half_mask))
863                 s->eqcr.pi_vb ^= QB_VALID_BIT;
864
865         return 0;
866 }
867
868 static int qbman_swp_enqueue_ring_mode_cinh_direct(
869                 struct qbman_swp *s,
870                 const struct qbman_eq_desc *d,
871                 const struct qbman_fd *fd)
872 {
873         uint32_t *p;
874         const uint32_t *cl = qb_cl(d);
875         uint32_t eqcr_ci, full_mask, half_mask;
876
877         half_mask = (s->eqcr.pi_ci_mask>>1);
878         full_mask = s->eqcr.pi_ci_mask;
879         if (!s->eqcr.available) {
880                 eqcr_ci = s->eqcr.ci;
881                 s->eqcr.ci = qbman_cinh_read(&s->sys,
882                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
883                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
884                                 eqcr_ci, s->eqcr.ci);
885                 if (!s->eqcr.available)
886                         return -EBUSY;
887         }
888
889         p = qbman_cinh_write_start_wo_shadow(&s->sys,
890                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
891         memcpy_byte_by_byte(&p[1], &cl[1], 28);
892         memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
893         lwsync();
894
895         /* Set the verb byte, have to substitute in the valid-bit */
896         p[0] = cl[0] | s->eqcr.pi_vb;
897         s->eqcr.pi++;
898         s->eqcr.pi &= full_mask;
899         s->eqcr.available--;
900         if (!(s->eqcr.pi & half_mask))
901                 s->eqcr.pi_vb ^= QB_VALID_BIT;
902
903         return 0;
904 }
905
906 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
907                                                 const struct qbman_eq_desc *d,
908                                                 const struct qbman_fd *fd)
909 {
910         uint32_t *p;
911         const uint32_t *cl = qb_cl(d);
912         uint32_t eqcr_ci, full_mask, half_mask;
913
914         half_mask = (s->eqcr.pi_ci_mask>>1);
915         full_mask = s->eqcr.pi_ci_mask;
916         if (!s->eqcr.available) {
917                 eqcr_ci = s->eqcr.ci;
918                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
919                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
920                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
921                                 eqcr_ci, s->eqcr.ci);
922                 if (!s->eqcr.available)
923                         return -EBUSY;
924         }
925
926         p = qbman_cena_write_start_wo_shadow(&s->sys,
927                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
928         memcpy(&p[1], &cl[1], 28);
929         memcpy(&p[8], fd, sizeof(*fd));
930
931         /* Set the verb byte, have to substitute in the valid-bit */
932         p[0] = cl[0] | s->eqcr.pi_vb;
933         s->eqcr.pi++;
934         s->eqcr.pi &= full_mask;
935         s->eqcr.available--;
936         if (!(s->eqcr.pi & half_mask))
937                 s->eqcr.pi_vb ^= QB_VALID_BIT;
938         dma_wmb();
939         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
940                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
941         return 0;
942 }
943
944 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
945                                        const struct qbman_eq_desc *d,
946                                        const struct qbman_fd *fd)
947 {
948         if (!s->stash_off)
949                 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
950         else
951                 return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
952 }
953
954 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
955                       const struct qbman_fd *fd)
956 {
957         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
958                 return qbman_swp_enqueue_array_mode(s, d, fd);
959         else    /* Use ring mode by default */
960                 return qbman_swp_enqueue_ring_mode(s, d, fd);
961 }
962
963 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
964                                              const struct qbman_eq_desc *d,
965                                              const struct qbman_fd *fd,
966                                              uint32_t *flags,
967                                              int num_frames)
968 {
969         uint32_t *p = NULL;
970         const uint32_t *cl = qb_cl(d);
971         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
972         int i, num_enqueued = 0;
973         uint64_t addr_cena;
974
975         half_mask = (s->eqcr.pi_ci_mask>>1);
976         full_mask = s->eqcr.pi_ci_mask;
977         if (!s->eqcr.available) {
978                 eqcr_ci = s->eqcr.ci;
979                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
980                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
981                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
982                                 eqcr_ci, s->eqcr.ci);
983                 if (!s->eqcr.available)
984                         return 0;
985         }
986
987         eqcr_pi = s->eqcr.pi;
988         num_enqueued = (s->eqcr.available < num_frames) ?
989                         s->eqcr.available : num_frames;
990         s->eqcr.available -= num_enqueued;
991         /* Fill in the EQCR ring */
992         for (i = 0; i < num_enqueued; i++) {
993                 p = qbman_cena_write_start_wo_shadow(&s->sys,
994                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
995                 memcpy(&p[1], &cl[1], 28);
996                 memcpy(&p[8], &fd[i], sizeof(*fd));
997                 eqcr_pi++;
998         }
999
1000         lwsync();
1001
1002         /* Set the verb byte, have to substitute in the valid-bit */
1003         eqcr_pi = s->eqcr.pi;
1004         for (i = 0; i < num_enqueued; i++) {
1005                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1006                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1007                 p[0] = cl[0] | s->eqcr.pi_vb;
1008                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1009                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1010
1011                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1012                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1013                 }
1014                 eqcr_pi++;
1015                 if (!(eqcr_pi & half_mask))
1016                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1017         }
1018
1019         /* Flush all the cacheline without load/store in between */
1020         eqcr_pi = s->eqcr.pi;
1021         addr_cena = (size_t)s->sys.addr_cena;
1022         for (i = 0; i < num_enqueued; i++) {
1023                 dcbf((uintptr_t)(addr_cena +
1024                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1025                 eqcr_pi++;
1026         }
1027         s->eqcr.pi = eqcr_pi & full_mask;
1028
1029         return num_enqueued;
1030 }
1031
1032 static int qbman_swp_enqueue_multiple_cinh_read_direct(
1033                 struct qbman_swp *s,
1034                 const struct qbman_eq_desc *d,
1035                 const struct qbman_fd *fd,
1036                 uint32_t *flags,
1037                 int num_frames)
1038 {
1039         uint32_t *p = NULL;
1040         const uint32_t *cl = qb_cl(d);
1041         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1042         int i, num_enqueued = 0;
1043         uint64_t addr_cena;
1044
1045         half_mask = (s->eqcr.pi_ci_mask>>1);
1046         full_mask = s->eqcr.pi_ci_mask;
1047         if (!s->eqcr.available) {
1048                 eqcr_ci = s->eqcr.ci;
1049                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1050                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1051                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1052                                 eqcr_ci, s->eqcr.ci);
1053                 if (!s->eqcr.available)
1054                         return 0;
1055         }
1056
1057         eqcr_pi = s->eqcr.pi;
1058         num_enqueued = (s->eqcr.available < num_frames) ?
1059                         s->eqcr.available : num_frames;
1060         s->eqcr.available -= num_enqueued;
1061         /* Fill in the EQCR ring */
1062         for (i = 0; i < num_enqueued; i++) {
1063                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1064                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1065                 memcpy(&p[1], &cl[1], 28);
1066                 memcpy(&p[8], &fd[i], sizeof(*fd));
1067                 eqcr_pi++;
1068         }
1069
1070         lwsync();
1071
1072         /* Set the verb byte, have to substitute in the valid-bit */
1073         eqcr_pi = s->eqcr.pi;
1074         for (i = 0; i < num_enqueued; i++) {
1075                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1076                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1077                 p[0] = cl[0] | s->eqcr.pi_vb;
1078                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1079                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1080
1081                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1082                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1083                 }
1084                 eqcr_pi++;
1085                 if (!(eqcr_pi & half_mask))
1086                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1087         }
1088
1089         /* Flush all the cacheline without load/store in between */
1090         eqcr_pi = s->eqcr.pi;
1091         addr_cena = (size_t)s->sys.addr_cena;
1092         for (i = 0; i < num_enqueued; i++) {
1093                 dcbf(addr_cena +
1094                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1095                 eqcr_pi++;
1096         }
1097         s->eqcr.pi = eqcr_pi & full_mask;
1098
1099         return num_enqueued;
1100 }
1101
1102 static int qbman_swp_enqueue_multiple_cinh_direct(
1103                 struct qbman_swp *s,
1104                 const struct qbman_eq_desc *d,
1105                 const struct qbman_fd *fd,
1106                 uint32_t *flags,
1107                 int num_frames)
1108 {
1109         uint32_t *p = NULL;
1110         const uint32_t *cl = qb_cl(d);
1111         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1112         int i, num_enqueued = 0;
1113
1114         half_mask = (s->eqcr.pi_ci_mask>>1);
1115         full_mask = s->eqcr.pi_ci_mask;
1116         if (!s->eqcr.available) {
1117                 eqcr_ci = s->eqcr.ci;
1118                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1119                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1120                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1121                                 eqcr_ci, s->eqcr.ci);
1122                 if (!s->eqcr.available)
1123                         return 0;
1124         }
1125
1126         eqcr_pi = s->eqcr.pi;
1127         num_enqueued = (s->eqcr.available < num_frames) ?
1128                         s->eqcr.available : num_frames;
1129         s->eqcr.available -= num_enqueued;
1130         /* Fill in the EQCR ring */
1131         for (i = 0; i < num_enqueued; i++) {
1132                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1133                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1134                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1135                 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1136                 eqcr_pi++;
1137         }
1138
1139         lwsync();
1140
1141         /* Set the verb byte, have to substitute in the valid-bit */
1142         eqcr_pi = s->eqcr.pi;
1143         for (i = 0; i < num_enqueued; i++) {
1144                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1145                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1146                 p[0] = cl[0] | s->eqcr.pi_vb;
1147                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1148                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1149
1150                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1151                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1152                 }
1153                 eqcr_pi++;
1154                 if (!(eqcr_pi & half_mask))
1155                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1156         }
1157
1158         s->eqcr.pi = eqcr_pi & full_mask;
1159
1160         return num_enqueued;
1161 }
1162
1163 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
1164                                                const struct qbman_eq_desc *d,
1165                                                const struct qbman_fd *fd,
1166                                                uint32_t *flags,
1167                                                int num_frames)
1168 {
1169         uint32_t *p = NULL;
1170         const uint32_t *cl = qb_cl(d);
1171         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1172         int i, num_enqueued = 0;
1173
1174         half_mask = (s->eqcr.pi_ci_mask>>1);
1175         full_mask = s->eqcr.pi_ci_mask;
1176         if (!s->eqcr.available) {
1177                 eqcr_ci = s->eqcr.ci;
1178                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1179                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1180                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1181                                         eqcr_ci, s->eqcr.ci);
1182                 if (!s->eqcr.available)
1183                         return 0;
1184         }
1185
1186         eqcr_pi = s->eqcr.pi;
1187         num_enqueued = (s->eqcr.available < num_frames) ?
1188                         s->eqcr.available : num_frames;
1189         s->eqcr.available -= num_enqueued;
1190         /* Fill in the EQCR ring */
1191         for (i = 0; i < num_enqueued; i++) {
1192                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1193                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1194                 memcpy(&p[1], &cl[1], 28);
1195                 memcpy(&p[8], &fd[i], sizeof(*fd));
1196                 p[0] = cl[0] | s->eqcr.pi_vb;
1197
1198                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1199                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1200
1201                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1202                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1203                 }
1204                 eqcr_pi++;
1205
1206                 if (!(eqcr_pi & half_mask))
1207                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1208         }
1209         s->eqcr.pi = eqcr_pi & full_mask;
1210
1211         dma_wmb();
1212         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1213                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1214         return num_enqueued;
1215 }
1216
1217 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1218                                       const struct qbman_eq_desc *d,
1219                                       const struct qbman_fd *fd,
1220                                       uint32_t *flags,
1221                                       int num_frames)
1222 {
1223         if (!s->stash_off)
1224                 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags,
1225                                                 num_frames);
1226         else
1227                 return qbman_swp_enqueue_multiple_cinh_direct(s, d, fd, flags,
1228                                                 num_frames);
1229 }
1230
1231 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1232                                                 const struct qbman_eq_desc *d,
1233                                                 struct qbman_fd **fd,
1234                                                 uint32_t *flags,
1235                                                 int num_frames)
1236 {
1237         uint32_t *p = NULL;
1238         const uint32_t *cl = qb_cl(d);
1239         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1240         int i, num_enqueued = 0;
1241         uint64_t addr_cena;
1242
1243         half_mask = (s->eqcr.pi_ci_mask>>1);
1244         full_mask = s->eqcr.pi_ci_mask;
1245         if (!s->eqcr.available) {
1246                 eqcr_ci = s->eqcr.ci;
1247                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1248                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1249                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1250                                 eqcr_ci, s->eqcr.ci);
1251                 if (!s->eqcr.available)
1252                         return 0;
1253         }
1254
1255         eqcr_pi = s->eqcr.pi;
1256         num_enqueued = (s->eqcr.available < num_frames) ?
1257                         s->eqcr.available : num_frames;
1258         s->eqcr.available -= num_enqueued;
1259         /* Fill in the EQCR ring */
1260         for (i = 0; i < num_enqueued; i++) {
1261                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1262                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1263                 memcpy(&p[1], &cl[1], 28);
1264                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1265                 eqcr_pi++;
1266         }
1267
1268         lwsync();
1269
1270         /* Set the verb byte, have to substitute in the valid-bit */
1271         eqcr_pi = s->eqcr.pi;
1272         for (i = 0; i < num_enqueued; i++) {
1273                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1274                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1275                 p[0] = cl[0] | s->eqcr.pi_vb;
1276                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1277                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1278
1279                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1280                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1281                 }
1282                 eqcr_pi++;
1283                 if (!(eqcr_pi & half_mask))
1284                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1285         }
1286
1287         /* Flush all the cacheline without load/store in between */
1288         eqcr_pi = s->eqcr.pi;
1289         addr_cena = (size_t)s->sys.addr_cena;
1290         for (i = 0; i < num_enqueued; i++) {
1291                 dcbf(addr_cena +
1292                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1293                 eqcr_pi++;
1294         }
1295         s->eqcr.pi = eqcr_pi & full_mask;
1296
1297         return num_enqueued;
1298 }
1299
1300 static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
1301                 struct qbman_swp *s,
1302                 const struct qbman_eq_desc *d,
1303                 struct qbman_fd **fd,
1304                 uint32_t *flags,
1305                 int num_frames)
1306 {
1307         uint32_t *p = NULL;
1308         const uint32_t *cl = qb_cl(d);
1309         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1310         int i, num_enqueued = 0;
1311         uint64_t addr_cena;
1312
1313         half_mask = (s->eqcr.pi_ci_mask>>1);
1314         full_mask = s->eqcr.pi_ci_mask;
1315         if (!s->eqcr.available) {
1316                 eqcr_ci = s->eqcr.ci;
1317                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1318                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1319                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1320                                 eqcr_ci, s->eqcr.ci);
1321                 if (!s->eqcr.available)
1322                         return 0;
1323         }
1324
1325         eqcr_pi = s->eqcr.pi;
1326         num_enqueued = (s->eqcr.available < num_frames) ?
1327                         s->eqcr.available : num_frames;
1328         s->eqcr.available -= num_enqueued;
1329         /* Fill in the EQCR ring */
1330         for (i = 0; i < num_enqueued; i++) {
1331                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1332                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1333                 memcpy(&p[1], &cl[1], 28);
1334                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1335                 eqcr_pi++;
1336         }
1337
1338         lwsync();
1339
1340         /* Set the verb byte, have to substitute in the valid-bit */
1341         eqcr_pi = s->eqcr.pi;
1342         for (i = 0; i < num_enqueued; i++) {
1343                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1344                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1345                 p[0] = cl[0] | s->eqcr.pi_vb;
1346                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1347                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1348
1349                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1350                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1351                 }
1352                 eqcr_pi++;
1353                 if (!(eqcr_pi & half_mask))
1354                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1355         }
1356
1357         /* Flush all the cacheline without load/store in between */
1358         eqcr_pi = s->eqcr.pi;
1359         addr_cena = (size_t)s->sys.addr_cena;
1360         for (i = 0; i < num_enqueued; i++) {
1361                 dcbf(addr_cena +
1362                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1363                 eqcr_pi++;
1364         }
1365         s->eqcr.pi = eqcr_pi & full_mask;
1366
1367         return num_enqueued;
1368 }
1369
1370 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1371                 struct qbman_swp *s,
1372                 const struct qbman_eq_desc *d,
1373                 struct qbman_fd **fd,
1374                 uint32_t *flags,
1375                 int num_frames)
1376 {
1377         uint32_t *p = NULL;
1378         const uint32_t *cl = qb_cl(d);
1379         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1380         int i, num_enqueued = 0;
1381
1382         half_mask = (s->eqcr.pi_ci_mask>>1);
1383         full_mask = s->eqcr.pi_ci_mask;
1384         if (!s->eqcr.available) {
1385                 eqcr_ci = s->eqcr.ci;
1386                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1387                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1388                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1389                                 eqcr_ci, s->eqcr.ci);
1390                 if (!s->eqcr.available)
1391                         return 0;
1392         }
1393
1394         eqcr_pi = s->eqcr.pi;
1395         num_enqueued = (s->eqcr.available < num_frames) ?
1396                         s->eqcr.available : num_frames;
1397         s->eqcr.available -= num_enqueued;
1398         /* Fill in the EQCR ring */
1399         for (i = 0; i < num_enqueued; i++) {
1400                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1401                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1402                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1403                 memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
1404                 eqcr_pi++;
1405         }
1406
1407         lwsync();
1408
1409         /* Set the verb byte, have to substitute in the valid-bit */
1410         eqcr_pi = s->eqcr.pi;
1411         for (i = 0; i < num_enqueued; i++) {
1412                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1413                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1414                 p[0] = cl[0] | s->eqcr.pi_vb;
1415                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1416                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1417
1418                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1419                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1420                 }
1421                 eqcr_pi++;
1422                 if (!(eqcr_pi & half_mask))
1423                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1424         }
1425
1426         s->eqcr.pi = eqcr_pi & full_mask;
1427
1428         return num_enqueued;
1429 }
1430
1431 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1432                                                   const struct qbman_eq_desc *d,
1433                                                   struct qbman_fd **fd,
1434                                                   uint32_t *flags,
1435                                                   int num_frames)
1436 {
1437         uint32_t *p = NULL;
1438         const uint32_t *cl = qb_cl(d);
1439         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1440         int i, num_enqueued = 0;
1441
1442         half_mask = (s->eqcr.pi_ci_mask>>1);
1443         full_mask = s->eqcr.pi_ci_mask;
1444         if (!s->eqcr.available) {
1445                 eqcr_ci = s->eqcr.ci;
1446                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1447                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1448                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1449                                         eqcr_ci, s->eqcr.ci);
1450                 if (!s->eqcr.available)
1451                         return 0;
1452         }
1453
1454         eqcr_pi = s->eqcr.pi;
1455         num_enqueued = (s->eqcr.available < num_frames) ?
1456                         s->eqcr.available : num_frames;
1457         s->eqcr.available -= num_enqueued;
1458         /* Fill in the EQCR ring */
1459         for (i = 0; i < num_enqueued; i++) {
1460                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1461                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1462                 memcpy(&p[1], &cl[1], 28);
1463                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1464                 eqcr_pi++;
1465         }
1466
1467         /* Set the verb byte, have to substitute in the valid-bit */
1468         eqcr_pi = s->eqcr.pi;
1469         for (i = 0; i < num_enqueued; i++) {
1470                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1471                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1472                 p[0] = cl[0] | s->eqcr.pi_vb;
1473                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1474                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1475
1476                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1477                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1478                 }
1479                 eqcr_pi++;
1480                 if (!(eqcr_pi & half_mask))
1481                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1482         }
1483         s->eqcr.pi = eqcr_pi & full_mask;
1484
1485         dma_wmb();
1486         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1487                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1488         return num_enqueued;
1489 }
1490
1491 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1492                                          const struct qbman_eq_desc *d,
1493                                          struct qbman_fd **fd,
1494                                          uint32_t *flags,
1495                                          int num_frames)
1496 {
1497         if (!s->stash_off)
1498                 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags,
1499                                         num_frames);
1500         else
1501                 return qbman_swp_enqueue_multiple_fd_cinh_direct(s, d, fd,
1502                                         flags, num_frames);
1503 }
1504
1505 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1506                                         const struct qbman_eq_desc *d,
1507                                         const struct qbman_fd *fd,
1508                                         int num_frames)
1509 {
1510         uint32_t *p;
1511         const uint32_t *cl;
1512         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1513         int i, num_enqueued = 0;
1514         uint64_t addr_cena;
1515
1516         half_mask = (s->eqcr.pi_ci_mask>>1);
1517         full_mask = s->eqcr.pi_ci_mask;
1518         if (!s->eqcr.available) {
1519                 eqcr_ci = s->eqcr.ci;
1520                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1521                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1522                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1523                                         eqcr_ci, s->eqcr.ci);
1524                 if (!s->eqcr.available)
1525                         return 0;
1526         }
1527
1528         eqcr_pi = s->eqcr.pi;
1529         num_enqueued = (s->eqcr.available < num_frames) ?
1530                         s->eqcr.available : num_frames;
1531         s->eqcr.available -= num_enqueued;
1532         /* Fill in the EQCR ring */
1533         for (i = 0; i < num_enqueued; i++) {
1534                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1535                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1536                 cl = qb_cl(&d[i]);
1537                 memcpy(&p[1], &cl[1], 28);
1538                 memcpy(&p[8], &fd[i], sizeof(*fd));
1539                 eqcr_pi++;
1540         }
1541
1542         lwsync();
1543
1544         /* Set the verb byte, have to substitute in the valid-bit */
1545         eqcr_pi = s->eqcr.pi;
1546         for (i = 0; i < num_enqueued; i++) {
1547                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1548                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1549                 cl = qb_cl(&d[i]);
1550                 p[0] = cl[0] | s->eqcr.pi_vb;
1551                 eqcr_pi++;
1552                 if (!(eqcr_pi & half_mask))
1553                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1554         }
1555
1556         /* Flush all the cacheline without load/store in between */
1557         eqcr_pi = s->eqcr.pi;
1558         addr_cena = (size_t)s->sys.addr_cena;
1559         for (i = 0; i < num_enqueued; i++) {
1560                 dcbf((uintptr_t)(addr_cena +
1561                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1562                 eqcr_pi++;
1563         }
1564         s->eqcr.pi = eqcr_pi & full_mask;
1565
1566         return num_enqueued;
1567 }
1568
1569 static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
1570                 struct qbman_swp *s,
1571                 const struct qbman_eq_desc *d,
1572                 const struct qbman_fd *fd,
1573                 int num_frames)
1574 {
1575         uint32_t *p;
1576         const uint32_t *cl;
1577         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1578         int i, num_enqueued = 0;
1579         uint64_t addr_cena;
1580
1581         half_mask = (s->eqcr.pi_ci_mask>>1);
1582         full_mask = s->eqcr.pi_ci_mask;
1583         if (!s->eqcr.available) {
1584                 eqcr_ci = s->eqcr.ci;
1585                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1586                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1587                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1588                                         eqcr_ci, s->eqcr.ci);
1589                 if (!s->eqcr.available)
1590                         return 0;
1591         }
1592
1593         eqcr_pi = s->eqcr.pi;
1594         num_enqueued = (s->eqcr.available < num_frames) ?
1595                         s->eqcr.available : num_frames;
1596         s->eqcr.available -= num_enqueued;
1597         /* Fill in the EQCR ring */
1598         for (i = 0; i < num_enqueued; i++) {
1599                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1600                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1601                 cl = qb_cl(&d[i]);
1602                 memcpy(&p[1], &cl[1], 28);
1603                 memcpy(&p[8], &fd[i], sizeof(*fd));
1604                 eqcr_pi++;
1605         }
1606
1607         lwsync();
1608
1609         /* Set the verb byte, have to substitute in the valid-bit */
1610         eqcr_pi = s->eqcr.pi;
1611         for (i = 0; i < num_enqueued; i++) {
1612                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1613                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1614                 cl = qb_cl(&d[i]);
1615                 p[0] = cl[0] | s->eqcr.pi_vb;
1616                 eqcr_pi++;
1617                 if (!(eqcr_pi & half_mask))
1618                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1619         }
1620
1621         /* Flush all the cacheline without load/store in between */
1622         eqcr_pi = s->eqcr.pi;
1623         addr_cena = (size_t)s->sys.addr_cena;
1624         for (i = 0; i < num_enqueued; i++) {
1625                 dcbf(addr_cena +
1626                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1627                 eqcr_pi++;
1628         }
1629         s->eqcr.pi = eqcr_pi & full_mask;
1630
1631         return num_enqueued;
1632 }
1633
1634 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1635                 struct qbman_swp *s,
1636                 const struct qbman_eq_desc *d,
1637                 const struct qbman_fd *fd,
1638                 int num_frames)
1639 {
1640         uint32_t *p;
1641         const uint32_t *cl;
1642         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1643         int i, num_enqueued = 0;
1644
1645         half_mask = (s->eqcr.pi_ci_mask>>1);
1646         full_mask = s->eqcr.pi_ci_mask;
1647         if (!s->eqcr.available) {
1648                 eqcr_ci = s->eqcr.ci;
1649                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1650                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1651                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1652                                         eqcr_ci, s->eqcr.ci);
1653                 if (!s->eqcr.available)
1654                         return 0;
1655         }
1656
1657         eqcr_pi = s->eqcr.pi;
1658         num_enqueued = (s->eqcr.available < num_frames) ?
1659                         s->eqcr.available : num_frames;
1660         s->eqcr.available -= num_enqueued;
1661         /* Fill in the EQCR ring */
1662         for (i = 0; i < num_enqueued; i++) {
1663                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1664                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1665                 cl = qb_cl(&d[i]);
1666                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1667                 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1668                 eqcr_pi++;
1669         }
1670
1671         lwsync();
1672
1673         /* Set the verb byte, have to substitute in the valid-bit */
1674         eqcr_pi = s->eqcr.pi;
1675         for (i = 0; i < num_enqueued; i++) {
1676                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1677                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1678                 cl = qb_cl(&d[i]);
1679                 p[0] = cl[0] | s->eqcr.pi_vb;
1680                 eqcr_pi++;
1681                 if (!(eqcr_pi & half_mask))
1682                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1683         }
1684
1685         s->eqcr.pi = eqcr_pi & full_mask;
1686
1687         return num_enqueued;
1688 }
1689
1690 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1691                                         const struct qbman_eq_desc *d,
1692                                         const struct qbman_fd *fd,
1693                                         int num_frames)
1694 {
1695         uint32_t *p;
1696         const uint32_t *cl;
1697         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1698         int i, num_enqueued = 0;
1699
1700         half_mask = (s->eqcr.pi_ci_mask>>1);
1701         full_mask = s->eqcr.pi_ci_mask;
1702         if (!s->eqcr.available) {
1703                 eqcr_ci = s->eqcr.ci;
1704                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1705                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1706                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1707                                         eqcr_ci, s->eqcr.ci);
1708                 if (!s->eqcr.available)
1709                         return 0;
1710         }
1711
1712         eqcr_pi = s->eqcr.pi;
1713         num_enqueued = (s->eqcr.available < num_frames) ?
1714                         s->eqcr.available : num_frames;
1715         s->eqcr.available -= num_enqueued;
1716         /* Fill in the EQCR ring */
1717         for (i = 0; i < num_enqueued; i++) {
1718                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1719                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1720                 cl = qb_cl(&d[i]);
1721                 memcpy(&p[1], &cl[1], 28);
1722                 memcpy(&p[8], &fd[i], sizeof(*fd));
1723                 eqcr_pi++;
1724         }
1725
1726         /* Set the verb byte, have to substitute in the valid-bit */
1727         eqcr_pi = s->eqcr.pi;
1728         for (i = 0; i < num_enqueued; i++) {
1729                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1730                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1731                 cl = qb_cl(&d[i]);
1732                 p[0] = cl[0] | s->eqcr.pi_vb;
1733                 eqcr_pi++;
1734                 if (!(eqcr_pi & half_mask))
1735                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1736         }
1737
1738         s->eqcr.pi = eqcr_pi & full_mask;
1739
1740         dma_wmb();
1741         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1742                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1743
1744         return num_enqueued;
1745 }
1746 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1747                                            const struct qbman_eq_desc *d,
1748                                            const struct qbman_fd *fd,
1749                                            int num_frames)
1750 {
1751         if (!s->stash_off)
1752                 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd,
1753                                         num_frames);
1754         else
1755                 return qbman_swp_enqueue_multiple_desc_cinh_direct(s, d, fd,
1756                                         num_frames);
1757
1758 }
1759
1760 /*************************/
1761 /* Static (push) dequeue */
1762 /*************************/
1763
1764 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1765 {
1766         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1767
1768         QBMAN_BUG_ON(channel_idx > 15);
1769         *enabled = src | (1 << channel_idx);
1770 }
1771
1772 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1773 {
1774         uint16_t dqsrc;
1775
1776         QBMAN_BUG_ON(channel_idx > 15);
1777         if (enable)
1778                 s->sdq |= 1 << channel_idx;
1779         else
1780                 s->sdq &= ~(1 << channel_idx);
1781
1782         /* Read make the complete src map.  If no channels are enabled
1783          * the SDQCR must be 0 or else QMan will assert errors
1784          */
1785         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1786         if (dqsrc != 0)
1787                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1788         else
1789                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1790 }
1791
1792 /***************************/
1793 /* Volatile (pull) dequeue */
1794 /***************************/
1795
1796 /* These should be const, eventually */
1797 #define QB_VDQCR_VERB_DCT_SHIFT    0
1798 #define QB_VDQCR_VERB_DT_SHIFT     2
1799 #define QB_VDQCR_VERB_RLS_SHIFT    4
1800 #define QB_VDQCR_VERB_WAE_SHIFT    5
1801 #define QB_VDQCR_VERB_RAD_SHIFT    6
1802
1803 enum qb_pull_dt_e {
1804         qb_pull_dt_channel,
1805         qb_pull_dt_workqueue,
1806         qb_pull_dt_framequeue
1807 };
1808
1809 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1810 {
1811         memset(d, 0, sizeof(*d));
1812 }
1813
1814 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1815                                  struct qbman_result *storage,
1816                                  dma_addr_t storage_phys,
1817                                  int stash)
1818 {
1819         d->pull.rsp_addr_virt = (size_t)storage;
1820
1821         if (!storage) {
1822                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1823                 return;
1824         }
1825         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1826         if (stash)
1827                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1828         else
1829                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1830
1831         d->pull.rsp_addr = storage_phys;
1832 }
1833
1834 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1835                                    uint8_t numframes)
1836 {
1837         d->pull.numf = numframes - 1;
1838 }
1839
1840 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1841 {
1842         d->pull.tok = token;
1843 }
1844
1845 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1846 {
1847         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1848         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1849         d->pull.dq_src = fqid;
1850 }
1851
1852 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1853                             enum qbman_pull_type_e dct)
1854 {
1855         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1856         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1857         d->pull.dq_src = wqid;
1858 }
1859
1860 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1861                                  enum qbman_pull_type_e dct)
1862 {
1863         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1864         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1865         d->pull.dq_src = chid;
1866 }
1867
1868 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1869 {
1870         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1871                 if (rad)
1872                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1873                 else
1874                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1875         } else {
1876                 printf("The RAD feature is not valid when RLS = 0\n");
1877         }
1878 }
1879
1880 static int qbman_swp_pull_direct(struct qbman_swp *s,
1881                                  struct qbman_pull_desc *d)
1882 {
1883         uint32_t *p;
1884         uint32_t *cl = qb_cl(d);
1885
1886         if (!atomic_dec_and_test(&s->vdq.busy)) {
1887                 atomic_inc(&s->vdq.busy);
1888                 return -EBUSY;
1889         }
1890
1891         d->pull.tok = s->sys.idx + 1;
1892         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1893         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1894         memcpy(&p[1], &cl[1], 12);
1895
1896         /* Set the verb byte, have to substitute in the valid-bit */
1897         lwsync();
1898         p[0] = cl[0] | s->vdq.valid_bit;
1899         s->vdq.valid_bit ^= QB_VALID_BIT;
1900         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1901
1902         return 0;
1903 }
1904
1905 static int qbman_swp_pull_cinh_direct(struct qbman_swp *s,
1906                                  struct qbman_pull_desc *d)
1907 {
1908         uint32_t *p;
1909         uint32_t *cl = qb_cl(d);
1910
1911         if (!atomic_dec_and_test(&s->vdq.busy)) {
1912                 atomic_inc(&s->vdq.busy);
1913                 return -EBUSY;
1914         }
1915
1916         d->pull.tok = s->sys.idx + 1;
1917         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1918         p = qbman_cinh_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1919         memcpy_byte_by_byte(&p[1], &cl[1], 12);
1920
1921         /* Set the verb byte, have to substitute in the valid-bit */
1922         lwsync();
1923         p[0] = cl[0] | s->vdq.valid_bit;
1924         s->vdq.valid_bit ^= QB_VALID_BIT;
1925
1926         return 0;
1927 }
1928
1929 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1930                                    struct qbman_pull_desc *d)
1931 {
1932         uint32_t *p;
1933         uint32_t *cl = qb_cl(d);
1934
1935         if (!atomic_dec_and_test(&s->vdq.busy)) {
1936                 atomic_inc(&s->vdq.busy);
1937                 return -EBUSY;
1938         }
1939
1940         d->pull.tok = s->sys.idx + 1;
1941         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1942         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1943         memcpy(&p[1], &cl[1], 12);
1944
1945         /* Set the verb byte, have to substitute in the valid-bit */
1946         p[0] = cl[0] | s->vdq.valid_bit;
1947         s->vdq.valid_bit ^= QB_VALID_BIT;
1948         dma_wmb();
1949         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1950
1951         return 0;
1952 }
1953
1954 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1955 {
1956         if (!s->stash_off)
1957                 return qbman_swp_pull_ptr(s, d);
1958         else
1959                 return qbman_swp_pull_cinh_direct(s, d);
1960 }
1961
1962 /****************/
1963 /* Polling DQRR */
1964 /****************/
1965
1966 #define QMAN_DQRR_PI_MASK              0xf
1967
1968 #define QBMAN_RESULT_DQ        0x60
1969 #define QBMAN_RESULT_FQRN      0x21
1970 #define QBMAN_RESULT_FQRNI     0x22
1971 #define QBMAN_RESULT_FQPN      0x24
1972 #define QBMAN_RESULT_FQDAN     0x25
1973 #define QBMAN_RESULT_CDAN      0x26
1974 #define QBMAN_RESULT_CSCN_MEM  0x27
1975 #define QBMAN_RESULT_CGCU      0x28
1976 #define QBMAN_RESULT_BPSCN     0x29
1977 #define QBMAN_RESULT_CSCN_WQ   0x2a
1978
1979 #include <rte_prefetch.h>
1980
1981 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1982 {
1983         const struct qbman_result *p;
1984
1985         p = qbman_cena_read_wo_shadow(&s->sys,
1986                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1987         rte_prefetch0(p);
1988 }
1989
1990 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1991  * only once, so repeated calls can return a sequence of DQRR entries, without
1992  * requiring they be consumed immediately or in any particular order.
1993  */
1994 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1995 {
1996         if (!s->stash_off)
1997                 return qbman_swp_dqrr_next_ptr(s);
1998         else
1999                 return qbman_swp_dqrr_next_cinh_direct(s);
2000 }
2001
2002 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
2003 {
2004         uint32_t verb;
2005         uint32_t response_verb;
2006         uint32_t flags;
2007         const struct qbman_result *p;
2008
2009         /* Before using valid-bit to detect if something is there, we have to
2010          * handle the case of the DQRR reset bug...
2011          */
2012         if (s->dqrr.reset_bug) {
2013                 /* We pick up new entries by cache-inhibited producer index,
2014                  * which means that a non-coherent mapping would require us to
2015                  * invalidate and read *only* once that PI has indicated that
2016                  * there's an entry here. The first trip around the DQRR ring
2017                  * will be much less efficient than all subsequent trips around
2018                  * it...
2019                  */
2020                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2021                              QMAN_DQRR_PI_MASK;
2022
2023                 /* there are new entries if pi != next_idx */
2024                 if (pi == s->dqrr.next_idx)
2025                         return NULL;
2026
2027                 /* if next_idx is/was the last ring index, and 'pi' is
2028                  * different, we can disable the workaround as all the ring
2029                  * entries have now been DMA'd to so valid-bit checking is
2030                  * repaired. Note: this logic needs to be based on next_idx
2031                  * (which increments one at a time), rather than on pi (which
2032                  * can burst and wrap-around between our snapshots of it).
2033                  */
2034                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2035                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2036                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2037                                  s->dqrr.next_idx, pi);
2038                         s->dqrr.reset_bug = 0;
2039                 }
2040                 qbman_cena_invalidate_prefetch(&s->sys,
2041                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2042         }
2043         p = qbman_cena_read_wo_shadow(&s->sys,
2044                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2045
2046         verb = p->dq.verb;
2047
2048         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2049          * in the DQRR reset bug workaround, we shouldn't need to skip these
2050          * check, because we've already determined that a new entry is available
2051          * and we've invalidated the cacheline before reading it, so the
2052          * valid-bit behaviour is repaired and should tell us what we already
2053          * knew from reading PI.
2054          */
2055         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2056                 return NULL;
2057
2058         /* There's something there. Move "next_idx" attention to the next ring
2059          * entry (and prefetch it) before returning what we found.
2060          */
2061         s->dqrr.next_idx++;
2062         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2063                 s->dqrr.next_idx = 0;
2064                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2065         }
2066         /* If this is the final response to a volatile dequeue command
2067          * indicate that the vdq is no longer busy
2068          */
2069         flags = p->dq.stat;
2070         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2071         if ((response_verb == QBMAN_RESULT_DQ) &&
2072             (flags & QBMAN_DQ_STAT_VOLATILE) &&
2073             (flags & QBMAN_DQ_STAT_EXPIRED))
2074                 atomic_inc(&s->vdq.busy);
2075
2076         return p;
2077 }
2078
2079 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s)
2080 {
2081         uint32_t verb;
2082         uint32_t response_verb;
2083         uint32_t flags;
2084         const struct qbman_result *p;
2085
2086         /* Before using valid-bit to detect if something is there, we have to
2087          * handle the case of the DQRR reset bug...
2088          */
2089         if (s->dqrr.reset_bug) {
2090                 /* We pick up new entries by cache-inhibited producer index,
2091                  * which means that a non-coherent mapping would require us to
2092                  * invalidate and read *only* once that PI has indicated that
2093                  * there's an entry here. The first trip around the DQRR ring
2094                  * will be much less efficient than all subsequent trips around
2095                  * it...
2096                  */
2097                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2098                              QMAN_DQRR_PI_MASK;
2099
2100                 /* there are new entries if pi != next_idx */
2101                 if (pi == s->dqrr.next_idx)
2102                         return NULL;
2103
2104                 /* if next_idx is/was the last ring index, and 'pi' is
2105                  * different, we can disable the workaround as all the ring
2106                  * entries have now been DMA'd to so valid-bit checking is
2107                  * repaired. Note: this logic needs to be based on next_idx
2108                  * (which increments one at a time), rather than on pi (which
2109                  * can burst and wrap-around between our snapshots of it).
2110                  */
2111                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2112                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2113                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2114                                  s->dqrr.next_idx, pi);
2115                         s->dqrr.reset_bug = 0;
2116                 }
2117         }
2118         p = qbman_cinh_read_wo_shadow(&s->sys,
2119                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2120
2121         verb = p->dq.verb;
2122
2123         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2124          * in the DQRR reset bug workaround, we shouldn't need to skip these
2125          * check, because we've already determined that a new entry is available
2126          * and we've invalidated the cacheline before reading it, so the
2127          * valid-bit behaviour is repaired and should tell us what we already
2128          * knew from reading PI.
2129          */
2130         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2131                 return NULL;
2132
2133         /* There's something there. Move "next_idx" attention to the next ring
2134          * entry (and prefetch it) before returning what we found.
2135          */
2136         s->dqrr.next_idx++;
2137         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2138                 s->dqrr.next_idx = 0;
2139                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2140         }
2141         /* If this is the final response to a volatile dequeue command
2142          * indicate that the vdq is no longer busy
2143          */
2144         flags = p->dq.stat;
2145         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2146         if ((response_verb == QBMAN_RESULT_DQ) &&
2147             (flags & QBMAN_DQ_STAT_VOLATILE) &&
2148             (flags & QBMAN_DQ_STAT_EXPIRED))
2149                 atomic_inc(&s->vdq.busy);
2150
2151         return p;
2152 }
2153
2154 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
2155 {
2156         uint32_t verb;
2157         uint32_t response_verb;
2158         uint32_t flags;
2159         const struct qbman_result *p;
2160
2161         p = qbman_cena_read_wo_shadow(&s->sys,
2162                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
2163
2164         verb = p->dq.verb;
2165
2166         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2167          * in the DQRR reset bug workaround, we shouldn't need to skip these
2168          * check, because we've already determined that a new entry is available
2169          * and we've invalidated the cacheline before reading it, so the
2170          * valid-bit behaviour is repaired and should tell us what we already
2171          * knew from reading PI.
2172          */
2173         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2174                 return NULL;
2175
2176         /* There's something there. Move "next_idx" attention to the next ring
2177          * entry (and prefetch it) before returning what we found.
2178          */
2179         s->dqrr.next_idx++;
2180         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2181                 s->dqrr.next_idx = 0;
2182                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2183         }
2184         /* If this is the final response to a volatile dequeue command
2185          * indicate that the vdq is no longer busy
2186          */
2187         flags = p->dq.stat;
2188         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2189         if ((response_verb == QBMAN_RESULT_DQ)
2190                         && (flags & QBMAN_DQ_STAT_VOLATILE)
2191                         && (flags & QBMAN_DQ_STAT_EXPIRED))
2192                 atomic_inc(&s->vdq.busy);
2193         return p;
2194 }
2195
2196 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2197 void qbman_swp_dqrr_consume(struct qbman_swp *s,
2198                             const struct qbman_result *dq)
2199 {
2200         qbman_cinh_write(&s->sys,
2201                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
2202 }
2203
2204 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2205 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
2206                             uint8_t dqrr_index)
2207 {
2208         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
2209 }
2210
2211 /*********************************/
2212 /* Polling user-provided storage */
2213 /*********************************/
2214
2215 int qbman_result_has_new_result(struct qbman_swp *s,
2216                                 struct qbman_result *dq)
2217 {
2218         if (dq->dq.tok == 0)
2219                 return 0;
2220
2221         /*
2222          * Set token to be 0 so we will detect change back to 1
2223          * next time the looping is traversed. Const is cast away here
2224          * as we want users to treat the dequeue responses as read only.
2225          */
2226         ((struct qbman_result *)dq)->dq.tok = 0;
2227
2228         /*
2229          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2230          * the fact "VDQCR" shows busy doesn't mean that we hold the result
2231          * that makes it available. Eg. we may be looking at our 10th dequeue
2232          * result, having released VDQCR after the 1st result and it is now
2233          * busy due to some other command!
2234          */
2235         if (s->vdq.storage == dq) {
2236                 s->vdq.storage = NULL;
2237                 atomic_inc(&s->vdq.busy);
2238         }
2239
2240         return 1;
2241 }
2242
2243 int qbman_check_new_result(struct qbman_result *dq)
2244 {
2245         if (dq->dq.tok == 0)
2246                 return 0;
2247
2248         /*
2249          * Set token to be 0 so we will detect change back to 1
2250          * next time the looping is traversed. Const is cast away here
2251          * as we want users to treat the dequeue responses as read only.
2252          */
2253         ((struct qbman_result *)dq)->dq.tok = 0;
2254
2255         return 1;
2256 }
2257
2258 int qbman_check_command_complete(struct qbman_result *dq)
2259 {
2260         struct qbman_swp *s;
2261
2262         if (dq->dq.tok == 0)
2263                 return 0;
2264
2265         s = portal_idx_map[dq->dq.tok - 1];
2266         /*
2267          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2268          * the fact "VDQCR" shows busy doesn't mean that we hold the result
2269          * that makes it available. Eg. we may be looking at our 10th dequeue
2270          * result, having released VDQCR after the 1st result and it is now
2271          * busy due to some other command!
2272          */
2273         if (s->vdq.storage == dq) {
2274                 s->vdq.storage = NULL;
2275                 atomic_inc(&s->vdq.busy);
2276         }
2277
2278         return 1;
2279 }
2280
2281 /********************************/
2282 /* Categorising qbman results   */
2283 /********************************/
2284
2285 static inline int __qbman_result_is_x(const struct qbman_result *dq,
2286                                       uint8_t x)
2287 {
2288         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
2289
2290         return (response_verb == x);
2291 }
2292
2293 int qbman_result_is_DQ(const struct qbman_result *dq)
2294 {
2295         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
2296 }
2297
2298 int qbman_result_is_FQDAN(const struct qbman_result *dq)
2299 {
2300         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
2301 }
2302
2303 int qbman_result_is_CDAN(const struct qbman_result *dq)
2304 {
2305         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
2306 }
2307
2308 int qbman_result_is_CSCN(const struct qbman_result *dq)
2309 {
2310         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
2311                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
2312 }
2313
2314 int qbman_result_is_BPSCN(const struct qbman_result *dq)
2315 {
2316         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
2317 }
2318
2319 int qbman_result_is_CGCU(const struct qbman_result *dq)
2320 {
2321         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
2322 }
2323
2324 int qbman_result_is_FQRN(const struct qbman_result *dq)
2325 {
2326         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
2327 }
2328
2329 int qbman_result_is_FQRNI(const struct qbman_result *dq)
2330 {
2331         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
2332 }
2333
2334 int qbman_result_is_FQPN(const struct qbman_result *dq)
2335 {
2336         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
2337 }
2338
2339 /*********************************/
2340 /* Parsing frame dequeue results */
2341 /*********************************/
2342
2343 /* These APIs assume qbman_result_is_DQ() is TRUE */
2344
2345 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
2346 {
2347         return dq->dq.stat;
2348 }
2349
2350 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
2351 {
2352         return dq->dq.seqnum;
2353 }
2354
2355 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
2356 {
2357         return dq->dq.oprid;
2358 }
2359
2360 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
2361 {
2362         return dq->dq.fqid;
2363 }
2364
2365 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
2366 {
2367         return dq->dq.fq_byte_cnt;
2368 }
2369
2370 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
2371 {
2372         return dq->dq.fq_frm_cnt;
2373 }
2374
2375 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
2376 {
2377         return dq->dq.fqd_ctx;
2378 }
2379
2380 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
2381 {
2382         return (const struct qbman_fd *)&dq->dq.fd[0];
2383 }
2384
2385 /**************************************/
2386 /* Parsing state-change notifications */
2387 /**************************************/
2388 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
2389 {
2390         return scn->scn.state;
2391 }
2392
2393 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
2394 {
2395         return scn->scn.rid_tok;
2396 }
2397
2398 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
2399 {
2400         return scn->scn.ctx;
2401 }
2402
2403 /*****************/
2404 /* Parsing BPSCN */
2405 /*****************/
2406 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
2407 {
2408         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
2409 }
2410
2411 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
2412 {
2413         return !(int)(qbman_result_SCN_state(scn) & 0x1);
2414 }
2415
2416 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
2417 {
2418         return (int)(qbman_result_SCN_state(scn) & 0x2);
2419 }
2420
2421 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
2422 {
2423         return (int)(qbman_result_SCN_state(scn) & 0x4);
2424 }
2425
2426 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
2427 {
2428         return qbman_result_SCN_ctx(scn);
2429 }
2430
2431 /*****************/
2432 /* Parsing CGCU  */
2433 /*****************/
2434 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2435 {
2436         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2437 }
2438
2439 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2440 {
2441         return qbman_result_SCN_ctx(scn);
2442 }
2443
2444 /********************/
2445 /* Parsing EQ RESP  */
2446 /********************/
2447 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2448 {
2449         return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2450 }
2451
2452 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2453 {
2454         eqresp->eq_resp.rspid = val;
2455 }
2456
2457 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2458 {
2459         return eqresp->eq_resp.rspid;
2460 }
2461
2462 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2463 {
2464         if (eqresp->eq_resp.rc == 0xE)
2465                 return 0;
2466         else
2467                 return -1;
2468 }
2469
2470 /******************/
2471 /* Buffer release */
2472 /******************/
2473 #define QB_BR_RC_VALID_SHIFT  5
2474 #define QB_BR_RCDI_SHIFT      6
2475
2476 void qbman_release_desc_clear(struct qbman_release_desc *d)
2477 {
2478         memset(d, 0, sizeof(*d));
2479         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2480 }
2481
2482 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2483 {
2484         d->br.bpid = bpid;
2485 }
2486
2487 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2488 {
2489         if (enable)
2490                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2491         else
2492                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2493 }
2494
2495 #define RAR_IDX(rar)     ((rar) & 0x7)
2496 #define RAR_VB(rar)      ((rar) & 0x80)
2497 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2498
2499 static int qbman_swp_release_direct(struct qbman_swp *s,
2500                                     const struct qbman_release_desc *d,
2501                                     const uint64_t *buffers,
2502                                     unsigned int num_buffers)
2503 {
2504         uint32_t *p;
2505         const uint32_t *cl = qb_cl(d);
2506         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2507
2508         pr_debug("RAR=%08x\n", rar);
2509         if (!RAR_SUCCESS(rar))
2510                 return -EBUSY;
2511
2512         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2513
2514         /* Start the release command */
2515         p = qbman_cena_write_start_wo_shadow(&s->sys,
2516                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2517
2518         /* Copy the caller's buffer pointers to the command */
2519         u64_to_le32_copy(&p[2], buffers, num_buffers);
2520
2521         /* Set the verb byte, have to substitute in the valid-bit and the
2522          * number of buffers.
2523          */
2524         lwsync();
2525         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2526         qbman_cena_write_complete_wo_shadow(&s->sys,
2527                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2528
2529         return 0;
2530 }
2531
2532 static int qbman_swp_release_cinh_direct(struct qbman_swp *s,
2533                                     const struct qbman_release_desc *d,
2534                                     const uint64_t *buffers,
2535                                     unsigned int num_buffers)
2536 {
2537         uint32_t *p;
2538         const uint32_t *cl = qb_cl(d);
2539         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2540
2541         pr_debug("RAR=%08x\n", rar);
2542         if (!RAR_SUCCESS(rar))
2543                 return -EBUSY;
2544
2545         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2546
2547         /* Start the release command */
2548         p = qbman_cinh_write_start_wo_shadow(&s->sys,
2549                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2550
2551         /* Copy the caller's buffer pointers to the command */
2552         memcpy_byte_by_byte(&p[2], buffers, num_buffers * sizeof(uint64_t));
2553
2554         /* Set the verb byte, have to substitute in the valid-bit and the
2555          * number of buffers.
2556          */
2557         lwsync();
2558         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2559
2560         return 0;
2561 }
2562
2563 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2564                                       const struct qbman_release_desc *d,
2565                                       const uint64_t *buffers,
2566                                       unsigned int num_buffers)
2567 {
2568         uint32_t *p;
2569         const uint32_t *cl = qb_cl(d);
2570         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2571
2572         pr_debug("RAR=%08x\n", rar);
2573         if (!RAR_SUCCESS(rar))
2574                 return -EBUSY;
2575
2576         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2577
2578         /* Start the release command */
2579         p = qbman_cena_write_start_wo_shadow(&s->sys,
2580                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2581
2582         /* Copy the caller's buffer pointers to the command */
2583         u64_to_le32_copy(&p[2], buffers, num_buffers);
2584
2585         /* Set the verb byte, have to substitute in the valid-bit and the
2586          * number of buffers.
2587          */
2588         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2589         lwsync();
2590         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2591                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2592
2593         return 0;
2594 }
2595
2596 int qbman_swp_release(struct qbman_swp *s,
2597                              const struct qbman_release_desc *d,
2598                              const uint64_t *buffers,
2599                              unsigned int num_buffers)
2600 {
2601         if (!s->stash_off)
2602                 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2603         else
2604                 return qbman_swp_release_cinh_direct(s, d, buffers,
2605                                                 num_buffers);
2606 }
2607
2608 /*******************/
2609 /* Buffer acquires */
2610 /*******************/
2611 struct qbman_acquire_desc {
2612         uint8_t verb;
2613         uint8_t reserved;
2614         uint16_t bpid;
2615         uint8_t num;
2616         uint8_t reserved2[59];
2617 };
2618
2619 struct qbman_acquire_rslt {
2620         uint8_t verb;
2621         uint8_t rslt;
2622         uint16_t reserved;
2623         uint8_t num;
2624         uint8_t reserved2[3];
2625         uint64_t buf[7];
2626 };
2627
2628 static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid,
2629                                 uint64_t *buffers, unsigned int num_buffers)
2630 {
2631         struct qbman_acquire_desc *p;
2632         struct qbman_acquire_rslt *r;
2633
2634         if (!num_buffers || (num_buffers > 7))
2635                 return -EINVAL;
2636
2637         /* Start the management command */
2638         p = qbman_swp_mc_start(s);
2639
2640         if (!p)
2641                 return -EBUSY;
2642
2643         /* Encode the caller-provided attributes */
2644         p->bpid = bpid;
2645         p->num = num_buffers;
2646
2647         /* Complete the management command */
2648         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2649         if (!r) {
2650                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2651                        bpid);
2652                 return -EIO;
2653         }
2654
2655         /* Decode the outcome */
2656         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2657
2658         /* Determine success or failure */
2659         if (r->rslt != QBMAN_MC_RSLT_OK) {
2660                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2661                        bpid, r->rslt);
2662                 return -EIO;
2663         }
2664
2665         QBMAN_BUG_ON(r->num > num_buffers);
2666
2667         /* Copy the acquired buffers to the caller's array */
2668         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2669
2670         return (int)r->num;
2671 }
2672
2673 static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid,
2674                         uint64_t *buffers, unsigned int num_buffers)
2675 {
2676         struct qbman_acquire_desc *p;
2677         struct qbman_acquire_rslt *r;
2678
2679         if (!num_buffers || (num_buffers > 7))
2680                 return -EINVAL;
2681
2682         /* Start the management command */
2683         p = qbman_swp_mc_start(s);
2684
2685         if (!p)
2686                 return -EBUSY;
2687
2688         /* Encode the caller-provided attributes */
2689         p->bpid = bpid;
2690         p->num = num_buffers;
2691
2692         /* Complete the management command */
2693         r = qbman_swp_mc_complete_cinh(s, p, QBMAN_MC_ACQUIRE);
2694         if (!r) {
2695                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2696                        bpid);
2697                 return -EIO;
2698         }
2699
2700         /* Decode the outcome */
2701         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2702
2703         /* Determine success or failure */
2704         if (r->rslt != QBMAN_MC_RSLT_OK) {
2705                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2706                        bpid, r->rslt);
2707                 return -EIO;
2708         }
2709
2710         QBMAN_BUG_ON(r->num > num_buffers);
2711
2712         /* Copy the acquired buffers to the caller's array */
2713         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2714
2715         return (int)r->num;
2716 }
2717
2718 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2719                       unsigned int num_buffers)
2720 {
2721         if (!s->stash_off)
2722                 return qbman_swp_acquire_direct(s, bpid, buffers, num_buffers);
2723         else
2724                 return qbman_swp_acquire_cinh_direct(s, bpid, buffers,
2725                                         num_buffers);
2726 }
2727
2728 /*****************/
2729 /* FQ management */
2730 /*****************/
2731 struct qbman_alt_fq_state_desc {
2732         uint8_t verb;
2733         uint8_t reserved[3];
2734         uint32_t fqid;
2735         uint8_t reserved2[56];
2736 };
2737
2738 struct qbman_alt_fq_state_rslt {
2739         uint8_t verb;
2740         uint8_t rslt;
2741         uint8_t reserved[62];
2742 };
2743
2744 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2745
2746 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2747                                   uint8_t alt_fq_verb)
2748 {
2749         struct qbman_alt_fq_state_desc *p;
2750         struct qbman_alt_fq_state_rslt *r;
2751
2752         /* Start the management command */
2753         p = qbman_swp_mc_start(s);
2754         if (!p)
2755                 return -EBUSY;
2756
2757         p->fqid = fqid & ALT_FQ_FQID_MASK;
2758
2759         /* Complete the management command */
2760         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2761         if (!r) {
2762                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2763                        alt_fq_verb);
2764                 return -EIO;
2765         }
2766
2767         /* Decode the outcome */
2768         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2769
2770         /* Determine success or failure */
2771         if (r->rslt != QBMAN_MC_RSLT_OK) {
2772                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2773                        fqid, alt_fq_verb, r->rslt);
2774                 return -EIO;
2775         }
2776
2777         return 0;
2778 }
2779
2780 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2781 {
2782         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2783 }
2784
2785 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2786 {
2787         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2788 }
2789
2790 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2791 {
2792         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2793 }
2794
2795 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2796 {
2797         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2798 }
2799
2800 /**********************/
2801 /* Channel management */
2802 /**********************/
2803
2804 struct qbman_cdan_ctrl_desc {
2805         uint8_t verb;
2806         uint8_t reserved;
2807         uint16_t ch;
2808         uint8_t we;
2809         uint8_t ctrl;
2810         uint16_t reserved2;
2811         uint64_t cdan_ctx;
2812         uint8_t reserved3[48];
2813
2814 };
2815
2816 struct qbman_cdan_ctrl_rslt {
2817         uint8_t verb;
2818         uint8_t rslt;
2819         uint16_t ch;
2820         uint8_t reserved[60];
2821 };
2822
2823 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2824  * would be irresponsible to expose it.
2825  */
2826 #define CODE_CDAN_WE_EN    0x1
2827 #define CODE_CDAN_WE_CTX   0x4
2828
2829 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2830                               uint8_t we_mask, uint8_t cdan_en,
2831                               uint64_t ctx)
2832 {
2833         struct qbman_cdan_ctrl_desc *p;
2834         struct qbman_cdan_ctrl_rslt *r;
2835
2836         /* Start the management command */
2837         p = qbman_swp_mc_start(s);
2838         if (!p)
2839                 return -EBUSY;
2840
2841         /* Encode the caller-provided attributes */
2842         p->ch = channelid;
2843         p->we = we_mask;
2844         if (cdan_en)
2845                 p->ctrl = 1;
2846         else
2847                 p->ctrl = 0;
2848         p->cdan_ctx = ctx;
2849
2850         /* Complete the management command */
2851         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2852         if (!r) {
2853                 pr_err("qbman: wqchan config failed, no response\n");
2854                 return -EIO;
2855         }
2856
2857         /* Decode the outcome */
2858         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2859                      != QBMAN_WQCHAN_CONFIGURE);
2860
2861         /* Determine success or failure */
2862         if (r->rslt != QBMAN_MC_RSLT_OK) {
2863                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2864                        channelid, r->rslt);
2865                 return -EIO;
2866         }
2867
2868         return 0;
2869 }
2870
2871 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2872                                uint64_t ctx)
2873 {
2874         return qbman_swp_CDAN_set(s, channelid,
2875                                   CODE_CDAN_WE_CTX,
2876                                   0, ctx);
2877 }
2878
2879 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2880 {
2881         return qbman_swp_CDAN_set(s, channelid,
2882                                   CODE_CDAN_WE_EN,
2883                                   1, 0);
2884 }
2885
2886 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2887 {
2888         return qbman_swp_CDAN_set(s, channelid,
2889                                   CODE_CDAN_WE_EN,
2890                                   0, 0);
2891 }
2892
2893 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2894                                       uint64_t ctx)
2895 {
2896         return qbman_swp_CDAN_set(s, channelid,
2897                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2898                                   1, ctx);
2899 }
2900
2901 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2902 {
2903         return QBMAN_IDX_FROM_DQRR(dqrr);
2904 }
2905
2906 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2907 {
2908         struct qbman_result *dq;
2909
2910         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
2911         return dq;
2912 }