bus/fslmc: rename cinh read functions used for ls1088
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018-2020 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 uint32_t qman_version;
65
66 /* Internal Function declaration */
67 static int
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69                 const struct qbman_eq_desc *d,
70                 const struct qbman_fd *fd);
71 static int
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73                 const struct qbman_eq_desc *d,
74                 const struct qbman_fd *fd);
75
76 static int
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78                 const struct qbman_eq_desc *d,
79                 const struct qbman_fd *fd);
80 static int
81 qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
82                 const struct qbman_eq_desc *d,
83                 const struct qbman_fd *fd);
84 static int
85 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
86                 const struct qbman_eq_desc *d,
87                 const struct qbman_fd *fd);
88 static int
89 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
90                 const struct qbman_eq_desc *d,
91                 const struct qbman_fd *fd);
92
93 static int
94 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
95                 const struct qbman_eq_desc *d,
96                 const struct qbman_fd *fd,
97                 uint32_t *flags,
98                 int num_frames);
99 static int
100 qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
101                 const struct qbman_eq_desc *d,
102                 const struct qbman_fd *fd,
103                 uint32_t *flags,
104                 int num_frames);
105 static int
106 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
107                 const struct qbman_eq_desc *d,
108                 const struct qbman_fd *fd,
109                 uint32_t *flags,
110                 int num_frames);
111 static int
112 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
113                 const struct qbman_eq_desc *d,
114                 const struct qbman_fd *fd,
115                 uint32_t *flags,
116                 int num_frames);
117
118 static int
119 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
120                 const struct qbman_eq_desc *d,
121                 struct qbman_fd **fd,
122                 uint32_t *flags,
123                 int num_frames);
124 static int
125 qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
126                 const struct qbman_eq_desc *d,
127                 struct qbman_fd **fd,
128                 uint32_t *flags,
129                 int num_frames);
130 static int
131 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
132                 const struct qbman_eq_desc *d,
133                 struct qbman_fd **fd,
134                 uint32_t *flags,
135                 int num_frames);
136 static int
137 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
138                 const struct qbman_eq_desc *d,
139                 struct qbman_fd **fd,
140                 uint32_t *flags,
141                 int num_frames);
142
143 static int
144 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
145                 const struct qbman_eq_desc *d,
146                 const struct qbman_fd *fd,
147                 int num_frames);
148 static int
149 qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
150                 const struct qbman_eq_desc *d,
151                 const struct qbman_fd *fd,
152                 int num_frames);
153 static int
154 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
155                 const struct qbman_eq_desc *d,
156                 const struct qbman_fd *fd,
157                 int num_frames);
158 static int
159 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
160                 const struct qbman_eq_desc *d,
161                 const struct qbman_fd *fd,
162                 int num_frames);
163
164 static int
165 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
166 static int
167 qbman_swp_pull_cinh_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
168 static int
169 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
170
171 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
172 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s);
173 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
174
175 static int
176 qbman_swp_release_direct(struct qbman_swp *s,
177                 const struct qbman_release_desc *d,
178                 const uint64_t *buffers, unsigned int num_buffers);
179 static int
180 qbman_swp_release_cinh_direct(struct qbman_swp *s,
181                 const struct qbman_release_desc *d,
182                 const uint64_t *buffers, unsigned int num_buffers);
183 static int
184 qbman_swp_release_mem_back(struct qbman_swp *s,
185                 const struct qbman_release_desc *d,
186                 const uint64_t *buffers, unsigned int num_buffers);
187
188 /* Function pointers */
189 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
190                 const struct qbman_eq_desc *d,
191                 const struct qbman_fd *fd)
192         = qbman_swp_enqueue_array_mode_direct;
193
194 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
195                 const struct qbman_eq_desc *d,
196                 const struct qbman_fd *fd)
197         = qbman_swp_enqueue_ring_mode_direct;
198
199 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
200                 const struct qbman_eq_desc *d,
201                 const struct qbman_fd *fd,
202                 uint32_t *flags,
203                 int num_frames)
204         = qbman_swp_enqueue_multiple_direct;
205
206 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
207                 const struct qbman_eq_desc *d,
208                 struct qbman_fd **fd,
209                 uint32_t *flags,
210                 int num_frames)
211         = qbman_swp_enqueue_multiple_fd_direct;
212
213 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
214                 const struct qbman_eq_desc *d,
215                 const struct qbman_fd *fd,
216                 int num_frames)
217         = qbman_swp_enqueue_multiple_desc_direct;
218
219 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
220                 struct qbman_pull_desc *d)
221         = qbman_swp_pull_direct;
222
223 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
224                 = qbman_swp_dqrr_next_direct;
225
226 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
227                         const struct qbman_release_desc *d,
228                         const uint64_t *buffers, unsigned int num_buffers)
229                         = qbman_swp_release_direct;
230
231 /*********************************/
232 /* Portal constructor/destructor */
233 /*********************************/
234
235 /* Software portals should always be in the power-on state when we initialise,
236  * due to the CCSR-based portal reset functionality that MC has.
237  *
238  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
239  * valid-bits, so we need to support a workaround where we don't trust
240  * valid-bits when detecting new entries until any stale ring entries have been
241  * overwritten at least once. The idea is that we read PI for the first few
242  * entries, then switch to valid-bit after that. The trick is to clear the
243  * bug-work-around boolean once the PI wraps around the ring for the first time.
244  *
245  * Note: this still carries a slight additional cost once the decrementer hits
246  * zero.
247  */
248 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
249 {
250         int ret;
251         uint32_t eqcr_pi;
252         uint32_t mask_size;
253         struct qbman_swp *p = malloc(sizeof(*p));
254
255         if (!p)
256                 return NULL;
257
258         memset(p, 0, sizeof(struct qbman_swp));
259
260         p->desc = *d;
261 #ifdef QBMAN_CHECKING
262         p->mc.check = swp_mc_can_start;
263 #endif
264         p->mc.valid_bit = QB_VALID_BIT;
265         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
266         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
267         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
268         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
269                         && (d->cena_access_mode == qman_cena_fastest_access))
270                 p->mr.valid_bit = QB_VALID_BIT;
271
272         atomic_set(&p->vdq.busy, 1);
273         p->vdq.valid_bit = QB_VALID_BIT;
274         p->dqrr.valid_bit = QB_VALID_BIT;
275         qman_version = p->desc.qman_version;
276         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
277                 p->dqrr.dqrr_size = 4;
278                 p->dqrr.reset_bug = 1;
279         } else {
280                 p->dqrr.dqrr_size = 8;
281                 p->dqrr.reset_bug = 0;
282         }
283
284         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
285         if (ret) {
286                 free(p);
287                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
288                 return NULL;
289         }
290
291         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
292          * in default state which is an error
293          */
294         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
295                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
296                 free(p);
297                 return NULL;
298         }
299
300         /* SDQCR needs to be initialized to 0 when no channels are
301          * being dequeued from or else the QMan HW will indicate an
302          * error.  The values that were calculated above will be
303          * applied when dequeues from a specific channel are enabled.
304          */
305         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
306
307         p->eqcr.pi_ring_size = 8;
308         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
309                         && (d->cena_access_mode == qman_cena_fastest_access)) {
310                 p->eqcr.pi_ring_size = 32;
311                 qbman_swp_enqueue_array_mode_ptr =
312                         qbman_swp_enqueue_array_mode_mem_back;
313                 qbman_swp_enqueue_ring_mode_ptr =
314                         qbman_swp_enqueue_ring_mode_mem_back;
315                 qbman_swp_enqueue_multiple_ptr =
316                         qbman_swp_enqueue_multiple_mem_back;
317                 qbman_swp_enqueue_multiple_fd_ptr =
318                         qbman_swp_enqueue_multiple_fd_mem_back;
319                 qbman_swp_enqueue_multiple_desc_ptr =
320                         qbman_swp_enqueue_multiple_desc_mem_back;
321                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
322                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
323                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
324         }
325
326         if (dpaa2_svr_family == SVR_LS1080A) {
327                 qbman_swp_enqueue_ring_mode_ptr =
328                         qbman_swp_enqueue_ring_mode_cinh_read_direct;
329                 qbman_swp_enqueue_multiple_ptr =
330                         qbman_swp_enqueue_multiple_cinh_read_direct;
331                 qbman_swp_enqueue_multiple_fd_ptr =
332                         qbman_swp_enqueue_multiple_fd_cinh_read_direct;
333                 qbman_swp_enqueue_multiple_desc_ptr =
334                         qbman_swp_enqueue_multiple_desc_cinh_read_direct;
335         }
336
337         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
338                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
339         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
340         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
341         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
342         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
343                         && (d->cena_access_mode == qman_cena_fastest_access))
344                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
345                                              & p->eqcr.pi_ci_mask;
346         else
347                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
348                                              & p->eqcr.pi_ci_mask;
349         p->eqcr.available = p->eqcr.pi_ring_size -
350                                 qm_cyc_diff(p->eqcr.pi_ring_size,
351                                 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
352                                 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
353
354         portal_idx_map[p->desc.idx] = p;
355         return p;
356 }
357
358 int qbman_swp_update(struct qbman_swp *p, int stash_off)
359 {
360         const struct qbman_swp_desc *d = &p->desc;
361         struct qbman_swp_sys *s = &p->sys;
362         int ret;
363
364         /* Nothing needs to be done for QBMAN rev > 5000 with fast access */
365         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
366                         && (d->cena_access_mode == qman_cena_fastest_access))
367                 return 0;
368
369         ret = qbman_swp_sys_update(s, d, p->dqrr.dqrr_size, stash_off);
370         if (ret) {
371                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
372                 return ret;
373         }
374
375         p->stash_off = stash_off;
376
377         return 0;
378 }
379
380 void qbman_swp_finish(struct qbman_swp *p)
381 {
382 #ifdef QBMAN_CHECKING
383         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
384 #endif
385         qbman_swp_sys_finish(&p->sys);
386         portal_idx_map[p->desc.idx] = NULL;
387         free(p);
388 }
389
390 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
391 {
392         return &p->desc;
393 }
394
395 /**************/
396 /* Interrupts */
397 /**************/
398
399 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
400 {
401         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
402 }
403
404 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
405 {
406         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
407 }
408
409 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
410 {
411         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
412 }
413
414 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
415 {
416         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
417 }
418
419 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
420 {
421         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
422 }
423
424 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
425 {
426         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
427 }
428
429 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
430 {
431         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
432 }
433
434 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
435 {
436         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
437 }
438
439 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
440 {
441         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
442 }
443
444 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
445 {
446         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
447 }
448
449 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
450 {
451         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
452 }
453
454 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
455 {
456         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
457                          inhibit ? 0xffffffff : 0);
458 }
459
460 /***********************/
461 /* Management commands */
462 /***********************/
463
464 /*
465  * Internal code common to all types of management commands.
466  */
467
468 void *qbman_swp_mc_start(struct qbman_swp *p)
469 {
470         void *ret;
471 #ifdef QBMAN_CHECKING
472         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
473 #endif
474         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
475                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
476                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
477         else
478                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
479 #ifdef QBMAN_CHECKING
480         if (!ret)
481                 p->mc.check = swp_mc_can_submit;
482 #endif
483         return ret;
484 }
485
486 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
487 {
488         uint8_t *v = cmd;
489 #ifdef QBMAN_CHECKING
490         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
491 #endif
492         /* TBD: "|=" is going to hurt performance. Need to move as many fields
493          * out of word zero, and for those that remain, the "OR" needs to occur
494          * at the caller side. This debug check helps to catch cases where the
495          * caller wants to OR but has forgotten to do so.
496          */
497         QBMAN_BUG_ON((*v & cmd_verb) != *v);
498         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
499                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
500                 *v = cmd_verb | p->mr.valid_bit;
501                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
502                 dma_wmb();
503                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
504         } else {
505                 dma_wmb();
506                 *v = cmd_verb | p->mc.valid_bit;
507                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
508                 clean(cmd);
509         }
510 #ifdef QBMAN_CHECKING
511         p->mc.check = swp_mc_can_poll;
512 #endif
513 }
514
515 void qbman_swp_mc_submit_cinh(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
516 {
517         uint8_t *v = cmd;
518 #ifdef QBMAN_CHECKING
519         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
520 #endif
521         /* TBD: "|=" is going to hurt performance. Need to move as many fields
522          * out of word zero, and for those that remain, the "OR" needs to occur
523          * at the caller side. This debug check helps to catch cases where the
524          * caller wants to OR but has forgotten to do so.
525          */
526         QBMAN_BUG_ON((*v & cmd_verb) != *v);
527         dma_wmb();
528         *v = cmd_verb | p->mc.valid_bit;
529         qbman_cinh_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
530         clean(cmd);
531 #ifdef QBMAN_CHECKING
532         p->mc.check = swp_mc_can_poll;
533 #endif
534 }
535
536 void *qbman_swp_mc_result(struct qbman_swp *p)
537 {
538         uint32_t *ret, verb;
539 #ifdef QBMAN_CHECKING
540         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
541 #endif
542         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
543                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
544                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
545                 /* Command completed if the valid bit is toggled */
546                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
547                         return NULL;
548                 /* Remove the valid-bit -
549                  * command completed iff the rest is non-zero
550                  */
551                 verb = ret[0] & ~QB_VALID_BIT;
552                 if (!verb)
553                         return NULL;
554                 p->mr.valid_bit ^= QB_VALID_BIT;
555         } else {
556                 qbman_cena_invalidate_prefetch(&p->sys,
557                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
558                 ret = qbman_cena_read(&p->sys,
559                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
560                 /* Remove the valid-bit -
561                  * command completed iff the rest is non-zero
562                  */
563                 verb = ret[0] & ~QB_VALID_BIT;
564                 if (!verb)
565                         return NULL;
566                 p->mc.valid_bit ^= QB_VALID_BIT;
567         }
568 #ifdef QBMAN_CHECKING
569         p->mc.check = swp_mc_can_start;
570 #endif
571         return ret;
572 }
573
574 void *qbman_swp_mc_result_cinh(struct qbman_swp *p)
575 {
576         uint32_t *ret, verb;
577 #ifdef QBMAN_CHECKING
578         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
579 #endif
580         ret = qbman_cinh_read_shadow(&p->sys,
581                               QBMAN_CENA_SWP_RR(p->mc.valid_bit));
582         /* Remove the valid-bit -
583          * command completed iff the rest is non-zero
584          */
585         verb = ret[0] & ~QB_VALID_BIT;
586         if (!verb)
587                 return NULL;
588         p->mc.valid_bit ^= QB_VALID_BIT;
589 #ifdef QBMAN_CHECKING
590         p->mc.check = swp_mc_can_start;
591 #endif
592         return ret;
593 }
594
595 /***********/
596 /* Enqueue */
597 /***********/
598
599 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
600 enum qb_enqueue_commands {
601         enqueue_empty = 0,
602         enqueue_response_always = 1,
603         enqueue_rejects_to_fq = 2
604 };
605
606 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
607 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
608 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
609 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
610 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
611 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
612 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
613 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
614
615 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
616 {
617         memset(d, 0, sizeof(*d));
618 }
619
620 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
621 {
622         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
623         if (respond_success)
624                 d->eq.verb |= enqueue_response_always;
625         else
626                 d->eq.verb |= enqueue_rejects_to_fq;
627 }
628
629 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
630                            uint16_t opr_id, uint16_t seqnum, int incomplete)
631 {
632         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
633         if (respond_success)
634                 d->eq.verb |= enqueue_response_always;
635         else
636                 d->eq.verb |= enqueue_rejects_to_fq;
637
638         d->eq.orpid = opr_id;
639         d->eq.seqnum = seqnum;
640         if (incomplete)
641                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
642         else
643                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
644 }
645
646 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
647                                 uint16_t seqnum)
648 {
649         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
650         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
651         d->eq.orpid = opr_id;
652         d->eq.seqnum = seqnum;
653         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
654         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
655 }
656
657 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
658                                 uint16_t seqnum)
659 {
660         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
661         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
662         d->eq.orpid = opr_id;
663         d->eq.seqnum = seqnum;
664         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
665         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
666 }
667
668 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
669                                 dma_addr_t storage_phys,
670                                 int stash)
671 {
672         d->eq.rsp_addr = storage_phys;
673         d->eq.wae = stash;
674 }
675
676 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
677 {
678         d->eq.rspid = token;
679 }
680
681 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
682 {
683         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
684         d->eq.tgtid = fqid;
685 }
686
687 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
688                           uint16_t qd_bin, uint8_t qd_prio)
689 {
690         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
691         d->eq.tgtid = qdid;
692         d->eq.qdbin = qd_bin;
693         d->eq.qpri = qd_prio;
694 }
695
696 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
697 {
698         if (enable)
699                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
700         else
701                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
702 }
703
704 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
705                            uint8_t dqrr_idx, int park)
706 {
707         if (enable) {
708                 d->eq.dca = dqrr_idx;
709                 if (park)
710                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
711                 else
712                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
713                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
714         } else {
715                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
716         }
717 }
718
719 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
720 #define EQAR_VB(eqar)      ((eqar) & 0x80)
721 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
722
723 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
724                                                    uint8_t idx)
725 {
726         if (idx < 16)
727                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
728                                      QMAN_RT_MODE);
729         else
730                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
731                                      (idx - 16) * 4,
732                                      QMAN_RT_MODE);
733 }
734
735 static void memcpy_byte_by_byte(void *to, const void *from, size_t n)
736 {
737         const uint8_t *src = from;
738         volatile uint8_t *dest = to;
739         size_t i;
740
741         for (i = 0; i < n; i++)
742                 dest[i] = src[i];
743 }
744
745
746 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
747                                                const struct qbman_eq_desc *d,
748                                                const struct qbman_fd *fd)
749 {
750         uint32_t *p;
751         const uint32_t *cl = qb_cl(d);
752         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
753
754         pr_debug("EQAR=%08x\n", eqar);
755         if (!EQAR_SUCCESS(eqar))
756                 return -EBUSY;
757         p = qbman_cena_write_start_wo_shadow(&s->sys,
758                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
759         memcpy(&p[1], &cl[1], 28);
760         memcpy(&p[8], fd, sizeof(*fd));
761
762         /* Set the verb byte, have to substitute in the valid-bit */
763         dma_wmb();
764         p[0] = cl[0] | EQAR_VB(eqar);
765         qbman_cena_write_complete_wo_shadow(&s->sys,
766                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
767         return 0;
768 }
769 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
770                                                  const struct qbman_eq_desc *d,
771                                                  const struct qbman_fd *fd)
772 {
773         uint32_t *p;
774         const uint32_t *cl = qb_cl(d);
775         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
776
777         pr_debug("EQAR=%08x\n", eqar);
778         if (!EQAR_SUCCESS(eqar))
779                 return -EBUSY;
780         p = qbman_cena_write_start_wo_shadow(&s->sys,
781                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
782         memcpy(&p[1], &cl[1], 28);
783         memcpy(&p[8], fd, sizeof(*fd));
784
785         /* Set the verb byte, have to substitute in the valid-bit */
786         p[0] = cl[0] | EQAR_VB(eqar);
787         dma_wmb();
788         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
789         return 0;
790 }
791
792 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
793                                                const struct qbman_eq_desc *d,
794                                                const struct qbman_fd *fd)
795 {
796         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
797 }
798
799 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
800                                               const struct qbman_eq_desc *d,
801                                               const struct qbman_fd *fd)
802 {
803         uint32_t *p;
804         const uint32_t *cl = qb_cl(d);
805         uint32_t eqcr_ci, full_mask, half_mask;
806
807         half_mask = (s->eqcr.pi_ci_mask>>1);
808         full_mask = s->eqcr.pi_ci_mask;
809         if (!s->eqcr.available) {
810                 eqcr_ci = s->eqcr.ci;
811                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
812                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
813                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
814                                 eqcr_ci, s->eqcr.ci);
815                 if (!s->eqcr.available)
816                         return -EBUSY;
817         }
818
819         p = qbman_cena_write_start_wo_shadow(&s->sys,
820                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
821         memcpy(&p[1], &cl[1], 28);
822         memcpy(&p[8], fd, sizeof(*fd));
823         lwsync();
824
825         /* Set the verb byte, have to substitute in the valid-bit */
826         p[0] = cl[0] | s->eqcr.pi_vb;
827         qbman_cena_write_complete_wo_shadow(&s->sys,
828                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
829         s->eqcr.pi++;
830         s->eqcr.pi &= full_mask;
831         s->eqcr.available--;
832         if (!(s->eqcr.pi & half_mask))
833                 s->eqcr.pi_vb ^= QB_VALID_BIT;
834
835         return 0;
836 }
837
838 static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
839                 struct qbman_swp *s,
840                 const struct qbman_eq_desc *d,
841                 const struct qbman_fd *fd)
842 {
843         uint32_t *p;
844         const uint32_t *cl = qb_cl(d);
845         uint32_t eqcr_ci, full_mask, half_mask;
846
847         half_mask = (s->eqcr.pi_ci_mask>>1);
848         full_mask = s->eqcr.pi_ci_mask;
849         if (!s->eqcr.available) {
850                 eqcr_ci = s->eqcr.ci;
851                 s->eqcr.ci = qbman_cinh_read(&s->sys,
852                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
853                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
854                                 eqcr_ci, s->eqcr.ci);
855                 if (!s->eqcr.available)
856                         return -EBUSY;
857         }
858
859         p = qbman_cinh_write_start_wo_shadow(&s->sys,
860                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
861         memcpy(&p[1], &cl[1], 28);
862         memcpy(&p[8], fd, sizeof(*fd));
863         lwsync();
864
865         /* Set the verb byte, have to substitute in the valid-bit */
866         p[0] = cl[0] | s->eqcr.pi_vb;
867         s->eqcr.pi++;
868         s->eqcr.pi &= full_mask;
869         s->eqcr.available--;
870         if (!(s->eqcr.pi & half_mask))
871                 s->eqcr.pi_vb ^= QB_VALID_BIT;
872
873         return 0;
874 }
875
876 static int qbman_swp_enqueue_ring_mode_cinh_direct(
877                 struct qbman_swp *s,
878                 const struct qbman_eq_desc *d,
879                 const struct qbman_fd *fd)
880 {
881         uint32_t *p;
882         const uint32_t *cl = qb_cl(d);
883         uint32_t eqcr_ci, full_mask, half_mask;
884
885         half_mask = (s->eqcr.pi_ci_mask>>1);
886         full_mask = s->eqcr.pi_ci_mask;
887         if (!s->eqcr.available) {
888                 eqcr_ci = s->eqcr.ci;
889                 s->eqcr.ci = qbman_cinh_read(&s->sys,
890                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
891                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
892                                 eqcr_ci, s->eqcr.ci);
893                 if (!s->eqcr.available)
894                         return -EBUSY;
895         }
896
897         p = qbman_cinh_write_start_wo_shadow(&s->sys,
898                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
899         memcpy_byte_by_byte(&p[1], &cl[1], 28);
900         memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
901         lwsync();
902
903         /* Set the verb byte, have to substitute in the valid-bit */
904         p[0] = cl[0] | s->eqcr.pi_vb;
905         s->eqcr.pi++;
906         s->eqcr.pi &= full_mask;
907         s->eqcr.available--;
908         if (!(s->eqcr.pi & half_mask))
909                 s->eqcr.pi_vb ^= QB_VALID_BIT;
910
911         return 0;
912 }
913
914 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
915                                                 const struct qbman_eq_desc *d,
916                                                 const struct qbman_fd *fd)
917 {
918         uint32_t *p;
919         const uint32_t *cl = qb_cl(d);
920         uint32_t eqcr_ci, full_mask, half_mask;
921
922         half_mask = (s->eqcr.pi_ci_mask>>1);
923         full_mask = s->eqcr.pi_ci_mask;
924         if (!s->eqcr.available) {
925                 eqcr_ci = s->eqcr.ci;
926                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
927                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
928                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
929                                 eqcr_ci, s->eqcr.ci);
930                 if (!s->eqcr.available)
931                         return -EBUSY;
932         }
933
934         p = qbman_cena_write_start_wo_shadow(&s->sys,
935                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
936         memcpy(&p[1], &cl[1], 28);
937         memcpy(&p[8], fd, sizeof(*fd));
938
939         /* Set the verb byte, have to substitute in the valid-bit */
940         p[0] = cl[0] | s->eqcr.pi_vb;
941         s->eqcr.pi++;
942         s->eqcr.pi &= full_mask;
943         s->eqcr.available--;
944         if (!(s->eqcr.pi & half_mask))
945                 s->eqcr.pi_vb ^= QB_VALID_BIT;
946         dma_wmb();
947         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
948                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
949         return 0;
950 }
951
952 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
953                                        const struct qbman_eq_desc *d,
954                                        const struct qbman_fd *fd)
955 {
956         if (!s->stash_off)
957                 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
958         else
959                 return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
960 }
961
962 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
963                       const struct qbman_fd *fd)
964 {
965         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
966                 return qbman_swp_enqueue_array_mode(s, d, fd);
967         else    /* Use ring mode by default */
968                 return qbman_swp_enqueue_ring_mode(s, d, fd);
969 }
970
971 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
972                                              const struct qbman_eq_desc *d,
973                                              const struct qbman_fd *fd,
974                                              uint32_t *flags,
975                                              int num_frames)
976 {
977         uint32_t *p = NULL;
978         const uint32_t *cl = qb_cl(d);
979         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
980         int i, num_enqueued = 0;
981         uint64_t addr_cena;
982
983         half_mask = (s->eqcr.pi_ci_mask>>1);
984         full_mask = s->eqcr.pi_ci_mask;
985         if (!s->eqcr.available) {
986                 eqcr_ci = s->eqcr.ci;
987                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
988                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
989                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
990                                 eqcr_ci, s->eqcr.ci);
991                 if (!s->eqcr.available)
992                         return 0;
993         }
994
995         eqcr_pi = s->eqcr.pi;
996         num_enqueued = (s->eqcr.available < num_frames) ?
997                         s->eqcr.available : num_frames;
998         s->eqcr.available -= num_enqueued;
999         /* Fill in the EQCR ring */
1000         for (i = 0; i < num_enqueued; i++) {
1001                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1002                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1003                 memcpy(&p[1], &cl[1], 28);
1004                 memcpy(&p[8], &fd[i], sizeof(*fd));
1005                 eqcr_pi++;
1006         }
1007
1008         lwsync();
1009
1010         /* Set the verb byte, have to substitute in the valid-bit */
1011         eqcr_pi = s->eqcr.pi;
1012         for (i = 0; i < num_enqueued; i++) {
1013                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1014                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1015                 p[0] = cl[0] | s->eqcr.pi_vb;
1016                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1017                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1018
1019                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1020                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1021                 }
1022                 eqcr_pi++;
1023                 if (!(eqcr_pi & half_mask))
1024                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1025         }
1026
1027         /* Flush all the cacheline without load/store in between */
1028         eqcr_pi = s->eqcr.pi;
1029         addr_cena = (size_t)s->sys.addr_cena;
1030         for (i = 0; i < num_enqueued; i++) {
1031                 dcbf((uintptr_t)(addr_cena +
1032                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1033                 eqcr_pi++;
1034         }
1035         s->eqcr.pi = eqcr_pi & full_mask;
1036
1037         return num_enqueued;
1038 }
1039
1040 static int qbman_swp_enqueue_multiple_cinh_read_direct(
1041                 struct qbman_swp *s,
1042                 const struct qbman_eq_desc *d,
1043                 const struct qbman_fd *fd,
1044                 uint32_t *flags,
1045                 int num_frames)
1046 {
1047         uint32_t *p = NULL;
1048         const uint32_t *cl = qb_cl(d);
1049         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1050         int i, num_enqueued = 0;
1051         uint64_t addr_cena;
1052
1053         half_mask = (s->eqcr.pi_ci_mask>>1);
1054         full_mask = s->eqcr.pi_ci_mask;
1055         if (!s->eqcr.available) {
1056                 eqcr_ci = s->eqcr.ci;
1057                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1058                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1059                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1060                                 eqcr_ci, s->eqcr.ci);
1061                 if (!s->eqcr.available)
1062                         return 0;
1063         }
1064
1065         eqcr_pi = s->eqcr.pi;
1066         num_enqueued = (s->eqcr.available < num_frames) ?
1067                         s->eqcr.available : num_frames;
1068         s->eqcr.available -= num_enqueued;
1069         /* Fill in the EQCR ring */
1070         for (i = 0; i < num_enqueued; i++) {
1071                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1072                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1073                 memcpy(&p[1], &cl[1], 28);
1074                 memcpy(&p[8], &fd[i], sizeof(*fd));
1075                 eqcr_pi++;
1076         }
1077
1078         lwsync();
1079
1080         /* Set the verb byte, have to substitute in the valid-bit */
1081         eqcr_pi = s->eqcr.pi;
1082         for (i = 0; i < num_enqueued; i++) {
1083                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1084                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1085                 p[0] = cl[0] | s->eqcr.pi_vb;
1086                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1087                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1088
1089                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1090                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1091                 }
1092                 eqcr_pi++;
1093                 if (!(eqcr_pi & half_mask))
1094                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1095         }
1096
1097         /* Flush all the cacheline without load/store in between */
1098         eqcr_pi = s->eqcr.pi;
1099         addr_cena = (size_t)s->sys.addr_cena;
1100         for (i = 0; i < num_enqueued; i++) {
1101                 dcbf(addr_cena +
1102                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1103                 eqcr_pi++;
1104         }
1105         s->eqcr.pi = eqcr_pi & full_mask;
1106
1107         return num_enqueued;
1108 }
1109
1110 static int qbman_swp_enqueue_multiple_cinh_direct(
1111                 struct qbman_swp *s,
1112                 const struct qbman_eq_desc *d,
1113                 const struct qbman_fd *fd,
1114                 uint32_t *flags,
1115                 int num_frames)
1116 {
1117         uint32_t *p = NULL;
1118         const uint32_t *cl = qb_cl(d);
1119         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1120         int i, num_enqueued = 0;
1121
1122         half_mask = (s->eqcr.pi_ci_mask>>1);
1123         full_mask = s->eqcr.pi_ci_mask;
1124         if (!s->eqcr.available) {
1125                 eqcr_ci = s->eqcr.ci;
1126                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1127                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1128                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1129                                 eqcr_ci, s->eqcr.ci);
1130                 if (!s->eqcr.available)
1131                         return 0;
1132         }
1133
1134         eqcr_pi = s->eqcr.pi;
1135         num_enqueued = (s->eqcr.available < num_frames) ?
1136                         s->eqcr.available : num_frames;
1137         s->eqcr.available -= num_enqueued;
1138         /* Fill in the EQCR ring */
1139         for (i = 0; i < num_enqueued; i++) {
1140                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1141                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1142                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1143                 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1144                 eqcr_pi++;
1145         }
1146
1147         lwsync();
1148
1149         /* Set the verb byte, have to substitute in the valid-bit */
1150         eqcr_pi = s->eqcr.pi;
1151         for (i = 0; i < num_enqueued; i++) {
1152                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1153                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1154                 p[0] = cl[0] | s->eqcr.pi_vb;
1155                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1156                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1157
1158                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1159                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1160                 }
1161                 eqcr_pi++;
1162                 if (!(eqcr_pi & half_mask))
1163                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1164         }
1165
1166         s->eqcr.pi = eqcr_pi & full_mask;
1167
1168         return num_enqueued;
1169 }
1170
1171 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
1172                                                const struct qbman_eq_desc *d,
1173                                                const struct qbman_fd *fd,
1174                                                uint32_t *flags,
1175                                                int num_frames)
1176 {
1177         uint32_t *p = NULL;
1178         const uint32_t *cl = qb_cl(d);
1179         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1180         int i, num_enqueued = 0;
1181
1182         half_mask = (s->eqcr.pi_ci_mask>>1);
1183         full_mask = s->eqcr.pi_ci_mask;
1184         if (!s->eqcr.available) {
1185                 eqcr_ci = s->eqcr.ci;
1186                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1187                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1188                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1189                                         eqcr_ci, s->eqcr.ci);
1190                 if (!s->eqcr.available)
1191                         return 0;
1192         }
1193
1194         eqcr_pi = s->eqcr.pi;
1195         num_enqueued = (s->eqcr.available < num_frames) ?
1196                         s->eqcr.available : num_frames;
1197         s->eqcr.available -= num_enqueued;
1198         /* Fill in the EQCR ring */
1199         for (i = 0; i < num_enqueued; i++) {
1200                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1201                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1202                 memcpy(&p[1], &cl[1], 28);
1203                 memcpy(&p[8], &fd[i], sizeof(*fd));
1204                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1205                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1206
1207                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1208                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1209                 }
1210                 eqcr_pi++;
1211                 p[0] = cl[0] | s->eqcr.pi_vb;
1212
1213                 if (!(eqcr_pi & half_mask))
1214                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1215         }
1216         s->eqcr.pi = eqcr_pi & full_mask;
1217
1218         dma_wmb();
1219         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1220                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1221         return num_enqueued;
1222 }
1223
1224 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1225                                       const struct qbman_eq_desc *d,
1226                                       const struct qbman_fd *fd,
1227                                       uint32_t *flags,
1228                                       int num_frames)
1229 {
1230         if (!s->stash_off)
1231                 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags,
1232                                                 num_frames);
1233         else
1234                 return qbman_swp_enqueue_multiple_cinh_direct(s, d, fd, flags,
1235                                                 num_frames);
1236 }
1237
1238 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1239                                                 const struct qbman_eq_desc *d,
1240                                                 struct qbman_fd **fd,
1241                                                 uint32_t *flags,
1242                                                 int num_frames)
1243 {
1244         uint32_t *p = NULL;
1245         const uint32_t *cl = qb_cl(d);
1246         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1247         int i, num_enqueued = 0;
1248         uint64_t addr_cena;
1249
1250         half_mask = (s->eqcr.pi_ci_mask>>1);
1251         full_mask = s->eqcr.pi_ci_mask;
1252         if (!s->eqcr.available) {
1253                 eqcr_ci = s->eqcr.ci;
1254                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1255                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1256                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1257                                 eqcr_ci, s->eqcr.ci);
1258                 if (!s->eqcr.available)
1259                         return 0;
1260         }
1261
1262         eqcr_pi = s->eqcr.pi;
1263         num_enqueued = (s->eqcr.available < num_frames) ?
1264                         s->eqcr.available : num_frames;
1265         s->eqcr.available -= num_enqueued;
1266         /* Fill in the EQCR ring */
1267         for (i = 0; i < num_enqueued; i++) {
1268                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1269                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1270                 memcpy(&p[1], &cl[1], 28);
1271                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1272                 eqcr_pi++;
1273         }
1274
1275         lwsync();
1276
1277         /* Set the verb byte, have to substitute in the valid-bit */
1278         eqcr_pi = s->eqcr.pi;
1279         for (i = 0; i < num_enqueued; i++) {
1280                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1281                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1282                 p[0] = cl[0] | s->eqcr.pi_vb;
1283                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1284                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1285
1286                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1287                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1288                 }
1289                 eqcr_pi++;
1290                 if (!(eqcr_pi & half_mask))
1291                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1292         }
1293
1294         /* Flush all the cacheline without load/store in between */
1295         eqcr_pi = s->eqcr.pi;
1296         addr_cena = (size_t)s->sys.addr_cena;
1297         for (i = 0; i < num_enqueued; i++) {
1298                 dcbf(addr_cena +
1299                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1300                 eqcr_pi++;
1301         }
1302         s->eqcr.pi = eqcr_pi & full_mask;
1303
1304         return num_enqueued;
1305 }
1306
1307 static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
1308                 struct qbman_swp *s,
1309                 const struct qbman_eq_desc *d,
1310                 struct qbman_fd **fd,
1311                 uint32_t *flags,
1312                 int num_frames)
1313 {
1314         uint32_t *p = NULL;
1315         const uint32_t *cl = qb_cl(d);
1316         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1317         int i, num_enqueued = 0;
1318         uint64_t addr_cena;
1319
1320         half_mask = (s->eqcr.pi_ci_mask>>1);
1321         full_mask = s->eqcr.pi_ci_mask;
1322         if (!s->eqcr.available) {
1323                 eqcr_ci = s->eqcr.ci;
1324                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1325                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1326                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1327                                 eqcr_ci, s->eqcr.ci);
1328                 if (!s->eqcr.available)
1329                         return 0;
1330         }
1331
1332         eqcr_pi = s->eqcr.pi;
1333         num_enqueued = (s->eqcr.available < num_frames) ?
1334                         s->eqcr.available : num_frames;
1335         s->eqcr.available -= num_enqueued;
1336         /* Fill in the EQCR ring */
1337         for (i = 0; i < num_enqueued; i++) {
1338                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1339                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1340                 memcpy(&p[1], &cl[1], 28);
1341                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1342                 eqcr_pi++;
1343         }
1344
1345         lwsync();
1346
1347         /* Set the verb byte, have to substitute in the valid-bit */
1348         eqcr_pi = s->eqcr.pi;
1349         for (i = 0; i < num_enqueued; i++) {
1350                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1351                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1352                 p[0] = cl[0] | s->eqcr.pi_vb;
1353                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1354                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1355
1356                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1357                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1358                 }
1359                 eqcr_pi++;
1360                 if (!(eqcr_pi & half_mask))
1361                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1362         }
1363
1364         /* Flush all the cacheline without load/store in between */
1365         eqcr_pi = s->eqcr.pi;
1366         addr_cena = (size_t)s->sys.addr_cena;
1367         for (i = 0; i < num_enqueued; i++) {
1368                 dcbf(addr_cena +
1369                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1370                 eqcr_pi++;
1371         }
1372         s->eqcr.pi = eqcr_pi & full_mask;
1373
1374         return num_enqueued;
1375 }
1376
1377 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1378                 struct qbman_swp *s,
1379                 const struct qbman_eq_desc *d,
1380                 struct qbman_fd **fd,
1381                 uint32_t *flags,
1382                 int num_frames)
1383 {
1384         uint32_t *p = NULL;
1385         const uint32_t *cl = qb_cl(d);
1386         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1387         int i, num_enqueued = 0;
1388
1389         half_mask = (s->eqcr.pi_ci_mask>>1);
1390         full_mask = s->eqcr.pi_ci_mask;
1391         if (!s->eqcr.available) {
1392                 eqcr_ci = s->eqcr.ci;
1393                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1394                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1395                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1396                                 eqcr_ci, s->eqcr.ci);
1397                 if (!s->eqcr.available)
1398                         return 0;
1399         }
1400
1401         eqcr_pi = s->eqcr.pi;
1402         num_enqueued = (s->eqcr.available < num_frames) ?
1403                         s->eqcr.available : num_frames;
1404         s->eqcr.available -= num_enqueued;
1405         /* Fill in the EQCR ring */
1406         for (i = 0; i < num_enqueued; i++) {
1407                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1408                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1409                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1410                 memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
1411                 eqcr_pi++;
1412         }
1413
1414         lwsync();
1415
1416         /* Set the verb byte, have to substitute in the valid-bit */
1417         eqcr_pi = s->eqcr.pi;
1418         for (i = 0; i < num_enqueued; i++) {
1419                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1420                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1421                 p[0] = cl[0] | s->eqcr.pi_vb;
1422                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1423                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1424
1425                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1426                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1427                 }
1428                 eqcr_pi++;
1429                 if (!(eqcr_pi & half_mask))
1430                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1431         }
1432
1433         s->eqcr.pi = eqcr_pi & full_mask;
1434
1435         return num_enqueued;
1436 }
1437
1438 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1439                                                   const struct qbman_eq_desc *d,
1440                                                   struct qbman_fd **fd,
1441                                                   uint32_t *flags,
1442                                                   int num_frames)
1443 {
1444         uint32_t *p = NULL;
1445         const uint32_t *cl = qb_cl(d);
1446         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1447         int i, num_enqueued = 0;
1448
1449         half_mask = (s->eqcr.pi_ci_mask>>1);
1450         full_mask = s->eqcr.pi_ci_mask;
1451         if (!s->eqcr.available) {
1452                 eqcr_ci = s->eqcr.ci;
1453                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1454                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1455                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1456                                         eqcr_ci, s->eqcr.ci);
1457                 if (!s->eqcr.available)
1458                         return 0;
1459         }
1460
1461         eqcr_pi = s->eqcr.pi;
1462         num_enqueued = (s->eqcr.available < num_frames) ?
1463                         s->eqcr.available : num_frames;
1464         s->eqcr.available -= num_enqueued;
1465         /* Fill in the EQCR ring */
1466         for (i = 0; i < num_enqueued; i++) {
1467                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1468                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1469                 memcpy(&p[1], &cl[1], 28);
1470                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1471                 eqcr_pi++;
1472         }
1473
1474         /* Set the verb byte, have to substitute in the valid-bit */
1475         eqcr_pi = s->eqcr.pi;
1476         for (i = 0; i < num_enqueued; i++) {
1477                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1478                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1479                 p[0] = cl[0] | s->eqcr.pi_vb;
1480                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1481                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1482
1483                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1484                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1485                 }
1486                 eqcr_pi++;
1487                 if (!(eqcr_pi & half_mask))
1488                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1489         }
1490         s->eqcr.pi = eqcr_pi & full_mask;
1491
1492         dma_wmb();
1493         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1494                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1495         return num_enqueued;
1496 }
1497
1498 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1499                                          const struct qbman_eq_desc *d,
1500                                          struct qbman_fd **fd,
1501                                          uint32_t *flags,
1502                                          int num_frames)
1503 {
1504         if (!s->stash_off)
1505                 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags,
1506                                         num_frames);
1507         else
1508                 return qbman_swp_enqueue_multiple_fd_cinh_direct(s, d, fd,
1509                                         flags, num_frames);
1510 }
1511
1512 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1513                                         const struct qbman_eq_desc *d,
1514                                         const struct qbman_fd *fd,
1515                                         int num_frames)
1516 {
1517         uint32_t *p;
1518         const uint32_t *cl;
1519         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1520         int i, num_enqueued = 0;
1521         uint64_t addr_cena;
1522
1523         half_mask = (s->eqcr.pi_ci_mask>>1);
1524         full_mask = s->eqcr.pi_ci_mask;
1525         if (!s->eqcr.available) {
1526                 eqcr_ci = s->eqcr.ci;
1527                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1528                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1529                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1530                                         eqcr_ci, s->eqcr.ci);
1531                 if (!s->eqcr.available)
1532                         return 0;
1533         }
1534
1535         eqcr_pi = s->eqcr.pi;
1536         num_enqueued = (s->eqcr.available < num_frames) ?
1537                         s->eqcr.available : num_frames;
1538         s->eqcr.available -= num_enqueued;
1539         /* Fill in the EQCR ring */
1540         for (i = 0; i < num_enqueued; i++) {
1541                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1542                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1543                 cl = qb_cl(&d[i]);
1544                 memcpy(&p[1], &cl[1], 28);
1545                 memcpy(&p[8], &fd[i], sizeof(*fd));
1546                 eqcr_pi++;
1547         }
1548
1549         lwsync();
1550
1551         /* Set the verb byte, have to substitute in the valid-bit */
1552         eqcr_pi = s->eqcr.pi;
1553         for (i = 0; i < num_enqueued; i++) {
1554                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1555                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1556                 cl = qb_cl(&d[i]);
1557                 p[0] = cl[0] | s->eqcr.pi_vb;
1558                 eqcr_pi++;
1559                 if (!(eqcr_pi & half_mask))
1560                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1561         }
1562
1563         /* Flush all the cacheline without load/store in between */
1564         eqcr_pi = s->eqcr.pi;
1565         addr_cena = (size_t)s->sys.addr_cena;
1566         for (i = 0; i < num_enqueued; i++) {
1567                 dcbf((uintptr_t)(addr_cena +
1568                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1569                 eqcr_pi++;
1570         }
1571         s->eqcr.pi = eqcr_pi & full_mask;
1572
1573         return num_enqueued;
1574 }
1575
1576 static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
1577                 struct qbman_swp *s,
1578                 const struct qbman_eq_desc *d,
1579                 const struct qbman_fd *fd,
1580                 int num_frames)
1581 {
1582         uint32_t *p;
1583         const uint32_t *cl;
1584         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1585         int i, num_enqueued = 0;
1586         uint64_t addr_cena;
1587
1588         half_mask = (s->eqcr.pi_ci_mask>>1);
1589         full_mask = s->eqcr.pi_ci_mask;
1590         if (!s->eqcr.available) {
1591                 eqcr_ci = s->eqcr.ci;
1592                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1593                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1594                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1595                                         eqcr_ci, s->eqcr.ci);
1596                 if (!s->eqcr.available)
1597                         return 0;
1598         }
1599
1600         eqcr_pi = s->eqcr.pi;
1601         num_enqueued = (s->eqcr.available < num_frames) ?
1602                         s->eqcr.available : num_frames;
1603         s->eqcr.available -= num_enqueued;
1604         /* Fill in the EQCR ring */
1605         for (i = 0; i < num_enqueued; i++) {
1606                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1607                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1608                 cl = qb_cl(&d[i]);
1609                 memcpy(&p[1], &cl[1], 28);
1610                 memcpy(&p[8], &fd[i], sizeof(*fd));
1611                 eqcr_pi++;
1612         }
1613
1614         lwsync();
1615
1616         /* Set the verb byte, have to substitute in the valid-bit */
1617         eqcr_pi = s->eqcr.pi;
1618         for (i = 0; i < num_enqueued; i++) {
1619                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1620                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1621                 cl = qb_cl(&d[i]);
1622                 p[0] = cl[0] | s->eqcr.pi_vb;
1623                 eqcr_pi++;
1624                 if (!(eqcr_pi & half_mask))
1625                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1626         }
1627
1628         /* Flush all the cacheline without load/store in between */
1629         eqcr_pi = s->eqcr.pi;
1630         addr_cena = (size_t)s->sys.addr_cena;
1631         for (i = 0; i < num_enqueued; i++) {
1632                 dcbf(addr_cena +
1633                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1634                 eqcr_pi++;
1635         }
1636         s->eqcr.pi = eqcr_pi & full_mask;
1637
1638         return num_enqueued;
1639 }
1640
1641 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1642                 struct qbman_swp *s,
1643                 const struct qbman_eq_desc *d,
1644                 const struct qbman_fd *fd,
1645                 int num_frames)
1646 {
1647         uint32_t *p;
1648         const uint32_t *cl;
1649         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1650         int i, num_enqueued = 0;
1651
1652         half_mask = (s->eqcr.pi_ci_mask>>1);
1653         full_mask = s->eqcr.pi_ci_mask;
1654         if (!s->eqcr.available) {
1655                 eqcr_ci = s->eqcr.ci;
1656                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1657                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1658                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1659                                         eqcr_ci, s->eqcr.ci);
1660                 if (!s->eqcr.available)
1661                         return 0;
1662         }
1663
1664         eqcr_pi = s->eqcr.pi;
1665         num_enqueued = (s->eqcr.available < num_frames) ?
1666                         s->eqcr.available : num_frames;
1667         s->eqcr.available -= num_enqueued;
1668         /* Fill in the EQCR ring */
1669         for (i = 0; i < num_enqueued; i++) {
1670                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1671                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1672                 cl = qb_cl(&d[i]);
1673                 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1674                 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1675                 eqcr_pi++;
1676         }
1677
1678         lwsync();
1679
1680         /* Set the verb byte, have to substitute in the valid-bit */
1681         eqcr_pi = s->eqcr.pi;
1682         for (i = 0; i < num_enqueued; i++) {
1683                 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1684                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1685                 cl = qb_cl(&d[i]);
1686                 p[0] = cl[0] | s->eqcr.pi_vb;
1687                 eqcr_pi++;
1688                 if (!(eqcr_pi & half_mask))
1689                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1690         }
1691
1692         s->eqcr.pi = eqcr_pi & full_mask;
1693
1694         return num_enqueued;
1695 }
1696
1697 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1698                                         const struct qbman_eq_desc *d,
1699                                         const struct qbman_fd *fd,
1700                                         int num_frames)
1701 {
1702         uint32_t *p;
1703         const uint32_t *cl;
1704         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1705         int i, num_enqueued = 0;
1706
1707         half_mask = (s->eqcr.pi_ci_mask>>1);
1708         full_mask = s->eqcr.pi_ci_mask;
1709         if (!s->eqcr.available) {
1710                 eqcr_ci = s->eqcr.ci;
1711                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1712                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1713                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1714                                         eqcr_ci, s->eqcr.ci);
1715                 if (!s->eqcr.available)
1716                         return 0;
1717         }
1718
1719         eqcr_pi = s->eqcr.pi;
1720         num_enqueued = (s->eqcr.available < num_frames) ?
1721                         s->eqcr.available : num_frames;
1722         s->eqcr.available -= num_enqueued;
1723         /* Fill in the EQCR ring */
1724         for (i = 0; i < num_enqueued; i++) {
1725                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1726                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1727                 cl = qb_cl(&d[i]);
1728                 memcpy(&p[1], &cl[1], 28);
1729                 memcpy(&p[8], &fd[i], sizeof(*fd));
1730                 eqcr_pi++;
1731         }
1732
1733         /* Set the verb byte, have to substitute in the valid-bit */
1734         eqcr_pi = s->eqcr.pi;
1735         for (i = 0; i < num_enqueued; i++) {
1736                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1737                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1738                 cl = qb_cl(&d[i]);
1739                 p[0] = cl[0] | s->eqcr.pi_vb;
1740                 eqcr_pi++;
1741                 if (!(eqcr_pi & half_mask))
1742                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1743         }
1744
1745         s->eqcr.pi = eqcr_pi & full_mask;
1746
1747         dma_wmb();
1748         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1749                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1750
1751         return num_enqueued;
1752 }
1753 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1754                                            const struct qbman_eq_desc *d,
1755                                            const struct qbman_fd *fd,
1756                                            int num_frames)
1757 {
1758         if (!s->stash_off)
1759                 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd,
1760                                         num_frames);
1761         else
1762                 return qbman_swp_enqueue_multiple_desc_cinh_direct(s, d, fd,
1763                                         num_frames);
1764
1765 }
1766
1767 /*************************/
1768 /* Static (push) dequeue */
1769 /*************************/
1770
1771 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1772 {
1773         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1774
1775         QBMAN_BUG_ON(channel_idx > 15);
1776         *enabled = src | (1 << channel_idx);
1777 }
1778
1779 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1780 {
1781         uint16_t dqsrc;
1782
1783         QBMAN_BUG_ON(channel_idx > 15);
1784         if (enable)
1785                 s->sdq |= 1 << channel_idx;
1786         else
1787                 s->sdq &= ~(1 << channel_idx);
1788
1789         /* Read make the complete src map.  If no channels are enabled
1790          * the SDQCR must be 0 or else QMan will assert errors
1791          */
1792         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1793         if (dqsrc != 0)
1794                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1795         else
1796                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1797 }
1798
1799 /***************************/
1800 /* Volatile (pull) dequeue */
1801 /***************************/
1802
1803 /* These should be const, eventually */
1804 #define QB_VDQCR_VERB_DCT_SHIFT    0
1805 #define QB_VDQCR_VERB_DT_SHIFT     2
1806 #define QB_VDQCR_VERB_RLS_SHIFT    4
1807 #define QB_VDQCR_VERB_WAE_SHIFT    5
1808 #define QB_VDQCR_VERB_RAD_SHIFT    6
1809
1810 enum qb_pull_dt_e {
1811         qb_pull_dt_channel,
1812         qb_pull_dt_workqueue,
1813         qb_pull_dt_framequeue
1814 };
1815
1816 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1817 {
1818         memset(d, 0, sizeof(*d));
1819 }
1820
1821 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1822                                  struct qbman_result *storage,
1823                                  dma_addr_t storage_phys,
1824                                  int stash)
1825 {
1826         d->pull.rsp_addr_virt = (size_t)storage;
1827
1828         if (!storage) {
1829                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1830                 return;
1831         }
1832         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1833         if (stash)
1834                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1835         else
1836                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1837
1838         d->pull.rsp_addr = storage_phys;
1839 }
1840
1841 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1842                                    uint8_t numframes)
1843 {
1844         d->pull.numf = numframes - 1;
1845 }
1846
1847 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1848 {
1849         d->pull.tok = token;
1850 }
1851
1852 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1853 {
1854         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1855         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1856         d->pull.dq_src = fqid;
1857 }
1858
1859 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1860                             enum qbman_pull_type_e dct)
1861 {
1862         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1863         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1864         d->pull.dq_src = wqid;
1865 }
1866
1867 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1868                                  enum qbman_pull_type_e dct)
1869 {
1870         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1871         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1872         d->pull.dq_src = chid;
1873 }
1874
1875 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1876 {
1877         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1878                 if (rad)
1879                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1880                 else
1881                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1882         } else {
1883                 printf("The RAD feature is not valid when RLS = 0\n");
1884         }
1885 }
1886
1887 static int qbman_swp_pull_direct(struct qbman_swp *s,
1888                                  struct qbman_pull_desc *d)
1889 {
1890         uint32_t *p;
1891         uint32_t *cl = qb_cl(d);
1892
1893         if (!atomic_dec_and_test(&s->vdq.busy)) {
1894                 atomic_inc(&s->vdq.busy);
1895                 return -EBUSY;
1896         }
1897
1898         d->pull.tok = s->sys.idx + 1;
1899         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1900         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1901         memcpy(&p[1], &cl[1], 12);
1902
1903         /* Set the verb byte, have to substitute in the valid-bit */
1904         lwsync();
1905         p[0] = cl[0] | s->vdq.valid_bit;
1906         s->vdq.valid_bit ^= QB_VALID_BIT;
1907         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1908
1909         return 0;
1910 }
1911
1912 static int qbman_swp_pull_cinh_direct(struct qbman_swp *s,
1913                                  struct qbman_pull_desc *d)
1914 {
1915         uint32_t *p;
1916         uint32_t *cl = qb_cl(d);
1917
1918         if (!atomic_dec_and_test(&s->vdq.busy)) {
1919                 atomic_inc(&s->vdq.busy);
1920                 return -EBUSY;
1921         }
1922
1923         d->pull.tok = s->sys.idx + 1;
1924         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1925         p = qbman_cinh_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1926         memcpy_byte_by_byte(&p[1], &cl[1], 12);
1927
1928         /* Set the verb byte, have to substitute in the valid-bit */
1929         lwsync();
1930         p[0] = cl[0] | s->vdq.valid_bit;
1931         s->vdq.valid_bit ^= QB_VALID_BIT;
1932
1933         return 0;
1934 }
1935
1936 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1937                                    struct qbman_pull_desc *d)
1938 {
1939         uint32_t *p;
1940         uint32_t *cl = qb_cl(d);
1941
1942         if (!atomic_dec_and_test(&s->vdq.busy)) {
1943                 atomic_inc(&s->vdq.busy);
1944                 return -EBUSY;
1945         }
1946
1947         d->pull.tok = s->sys.idx + 1;
1948         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1949         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1950         memcpy(&p[1], &cl[1], 12);
1951
1952         /* Set the verb byte, have to substitute in the valid-bit */
1953         p[0] = cl[0] | s->vdq.valid_bit;
1954         s->vdq.valid_bit ^= QB_VALID_BIT;
1955         dma_wmb();
1956         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1957
1958         return 0;
1959 }
1960
1961 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1962 {
1963         if (!s->stash_off)
1964                 return qbman_swp_pull_ptr(s, d);
1965         else
1966                 return qbman_swp_pull_cinh_direct(s, d);
1967 }
1968
1969 /****************/
1970 /* Polling DQRR */
1971 /****************/
1972
1973 #define QMAN_DQRR_PI_MASK              0xf
1974
1975 #define QBMAN_RESULT_DQ        0x60
1976 #define QBMAN_RESULT_FQRN      0x21
1977 #define QBMAN_RESULT_FQRNI     0x22
1978 #define QBMAN_RESULT_FQPN      0x24
1979 #define QBMAN_RESULT_FQDAN     0x25
1980 #define QBMAN_RESULT_CDAN      0x26
1981 #define QBMAN_RESULT_CSCN_MEM  0x27
1982 #define QBMAN_RESULT_CGCU      0x28
1983 #define QBMAN_RESULT_BPSCN     0x29
1984 #define QBMAN_RESULT_CSCN_WQ   0x2a
1985
1986 #include <rte_prefetch.h>
1987
1988 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1989 {
1990         const struct qbman_result *p;
1991
1992         p = qbman_cena_read_wo_shadow(&s->sys,
1993                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1994         rte_prefetch0(p);
1995 }
1996
1997 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1998  * only once, so repeated calls can return a sequence of DQRR entries, without
1999  * requiring they be consumed immediately or in any particular order.
2000  */
2001 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
2002 {
2003         if (!s->stash_off)
2004                 return qbman_swp_dqrr_next_ptr(s);
2005         else
2006                 return qbman_swp_dqrr_next_cinh_direct(s);
2007 }
2008
2009 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
2010 {
2011         uint32_t verb;
2012         uint32_t response_verb;
2013         uint32_t flags;
2014         const struct qbman_result *p;
2015
2016         /* Before using valid-bit to detect if something is there, we have to
2017          * handle the case of the DQRR reset bug...
2018          */
2019         if (s->dqrr.reset_bug) {
2020                 /* We pick up new entries by cache-inhibited producer index,
2021                  * which means that a non-coherent mapping would require us to
2022                  * invalidate and read *only* once that PI has indicated that
2023                  * there's an entry here. The first trip around the DQRR ring
2024                  * will be much less efficient than all subsequent trips around
2025                  * it...
2026                  */
2027                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2028                              QMAN_DQRR_PI_MASK;
2029
2030                 /* there are new entries if pi != next_idx */
2031                 if (pi == s->dqrr.next_idx)
2032                         return NULL;
2033
2034                 /* if next_idx is/was the last ring index, and 'pi' is
2035                  * different, we can disable the workaround as all the ring
2036                  * entries have now been DMA'd to so valid-bit checking is
2037                  * repaired. Note: this logic needs to be based on next_idx
2038                  * (which increments one at a time), rather than on pi (which
2039                  * can burst and wrap-around between our snapshots of it).
2040                  */
2041                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2042                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2043                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2044                                  s->dqrr.next_idx, pi);
2045                         s->dqrr.reset_bug = 0;
2046                 }
2047                 qbman_cena_invalidate_prefetch(&s->sys,
2048                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2049         }
2050         p = qbman_cena_read_wo_shadow(&s->sys,
2051                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2052
2053         verb = p->dq.verb;
2054
2055         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2056          * in the DQRR reset bug workaround, we shouldn't need to skip these
2057          * check, because we've already determined that a new entry is available
2058          * and we've invalidated the cacheline before reading it, so the
2059          * valid-bit behaviour is repaired and should tell us what we already
2060          * knew from reading PI.
2061          */
2062         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2063                 return NULL;
2064
2065         /* There's something there. Move "next_idx" attention to the next ring
2066          * entry (and prefetch it) before returning what we found.
2067          */
2068         s->dqrr.next_idx++;
2069         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2070                 s->dqrr.next_idx = 0;
2071                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2072         }
2073         /* If this is the final response to a volatile dequeue command
2074          * indicate that the vdq is no longer busy
2075          */
2076         flags = p->dq.stat;
2077         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2078         if ((response_verb == QBMAN_RESULT_DQ) &&
2079             (flags & QBMAN_DQ_STAT_VOLATILE) &&
2080             (flags & QBMAN_DQ_STAT_EXPIRED))
2081                 atomic_inc(&s->vdq.busy);
2082
2083         return p;
2084 }
2085
2086 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s)
2087 {
2088         uint32_t verb;
2089         uint32_t response_verb;
2090         uint32_t flags;
2091         const struct qbman_result *p;
2092
2093         /* Before using valid-bit to detect if something is there, we have to
2094          * handle the case of the DQRR reset bug...
2095          */
2096         if (s->dqrr.reset_bug) {
2097                 /* We pick up new entries by cache-inhibited producer index,
2098                  * which means that a non-coherent mapping would require us to
2099                  * invalidate and read *only* once that PI has indicated that
2100                  * there's an entry here. The first trip around the DQRR ring
2101                  * will be much less efficient than all subsequent trips around
2102                  * it...
2103                  */
2104                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2105                              QMAN_DQRR_PI_MASK;
2106
2107                 /* there are new entries if pi != next_idx */
2108                 if (pi == s->dqrr.next_idx)
2109                         return NULL;
2110
2111                 /* if next_idx is/was the last ring index, and 'pi' is
2112                  * different, we can disable the workaround as all the ring
2113                  * entries have now been DMA'd to so valid-bit checking is
2114                  * repaired. Note: this logic needs to be based on next_idx
2115                  * (which increments one at a time), rather than on pi (which
2116                  * can burst and wrap-around between our snapshots of it).
2117                  */
2118                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2119                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2120                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2121                                  s->dqrr.next_idx, pi);
2122                         s->dqrr.reset_bug = 0;
2123                 }
2124         }
2125         p = qbman_cinh_read_wo_shadow(&s->sys,
2126                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2127
2128         verb = p->dq.verb;
2129
2130         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2131          * in the DQRR reset bug workaround, we shouldn't need to skip these
2132          * check, because we've already determined that a new entry is available
2133          * and we've invalidated the cacheline before reading it, so the
2134          * valid-bit behaviour is repaired and should tell us what we already
2135          * knew from reading PI.
2136          */
2137         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2138                 return NULL;
2139
2140         /* There's something there. Move "next_idx" attention to the next ring
2141          * entry (and prefetch it) before returning what we found.
2142          */
2143         s->dqrr.next_idx++;
2144         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2145                 s->dqrr.next_idx = 0;
2146                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2147         }
2148         /* If this is the final response to a volatile dequeue command
2149          * indicate that the vdq is no longer busy
2150          */
2151         flags = p->dq.stat;
2152         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2153         if ((response_verb == QBMAN_RESULT_DQ) &&
2154             (flags & QBMAN_DQ_STAT_VOLATILE) &&
2155             (flags & QBMAN_DQ_STAT_EXPIRED))
2156                 atomic_inc(&s->vdq.busy);
2157
2158         return p;
2159 }
2160
2161 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
2162 {
2163         uint32_t verb;
2164         uint32_t response_verb;
2165         uint32_t flags;
2166         const struct qbman_result *p;
2167
2168         p = qbman_cena_read_wo_shadow(&s->sys,
2169                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
2170
2171         verb = p->dq.verb;
2172
2173         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2174          * in the DQRR reset bug workaround, we shouldn't need to skip these
2175          * check, because we've already determined that a new entry is available
2176          * and we've invalidated the cacheline before reading it, so the
2177          * valid-bit behaviour is repaired and should tell us what we already
2178          * knew from reading PI.
2179          */
2180         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2181                 return NULL;
2182
2183         /* There's something there. Move "next_idx" attention to the next ring
2184          * entry (and prefetch it) before returning what we found.
2185          */
2186         s->dqrr.next_idx++;
2187         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2188                 s->dqrr.next_idx = 0;
2189                 s->dqrr.valid_bit ^= QB_VALID_BIT;
2190         }
2191         /* If this is the final response to a volatile dequeue command
2192          * indicate that the vdq is no longer busy
2193          */
2194         flags = p->dq.stat;
2195         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2196         if ((response_verb == QBMAN_RESULT_DQ)
2197                         && (flags & QBMAN_DQ_STAT_VOLATILE)
2198                         && (flags & QBMAN_DQ_STAT_EXPIRED))
2199                 atomic_inc(&s->vdq.busy);
2200         return p;
2201 }
2202
2203 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2204 void qbman_swp_dqrr_consume(struct qbman_swp *s,
2205                             const struct qbman_result *dq)
2206 {
2207         qbman_cinh_write(&s->sys,
2208                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
2209 }
2210
2211 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2212 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
2213                             uint8_t dqrr_index)
2214 {
2215         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
2216 }
2217
2218 /*********************************/
2219 /* Polling user-provided storage */
2220 /*********************************/
2221
2222 int qbman_result_has_new_result(struct qbman_swp *s,
2223                                 struct qbman_result *dq)
2224 {
2225         if (dq->dq.tok == 0)
2226                 return 0;
2227
2228         /*
2229          * Set token to be 0 so we will detect change back to 1
2230          * next time the looping is traversed. Const is cast away here
2231          * as we want users to treat the dequeue responses as read only.
2232          */
2233         ((struct qbman_result *)dq)->dq.tok = 0;
2234
2235         /*
2236          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2237          * the fact "VDQCR" shows busy doesn't mean that we hold the result
2238          * that makes it available. Eg. we may be looking at our 10th dequeue
2239          * result, having released VDQCR after the 1st result and it is now
2240          * busy due to some other command!
2241          */
2242         if (s->vdq.storage == dq) {
2243                 s->vdq.storage = NULL;
2244                 atomic_inc(&s->vdq.busy);
2245         }
2246
2247         return 1;
2248 }
2249
2250 int qbman_check_new_result(struct qbman_result *dq)
2251 {
2252         if (dq->dq.tok == 0)
2253                 return 0;
2254
2255         /*
2256          * Set token to be 0 so we will detect change back to 1
2257          * next time the looping is traversed. Const is cast away here
2258          * as we want users to treat the dequeue responses as read only.
2259          */
2260         ((struct qbman_result *)dq)->dq.tok = 0;
2261
2262         return 1;
2263 }
2264
2265 int qbman_check_command_complete(struct qbman_result *dq)
2266 {
2267         struct qbman_swp *s;
2268
2269         if (dq->dq.tok == 0)
2270                 return 0;
2271
2272         s = portal_idx_map[dq->dq.tok - 1];
2273         /*
2274          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2275          * the fact "VDQCR" shows busy doesn't mean that we hold the result
2276          * that makes it available. Eg. we may be looking at our 10th dequeue
2277          * result, having released VDQCR after the 1st result and it is now
2278          * busy due to some other command!
2279          */
2280         if (s->vdq.storage == dq) {
2281                 s->vdq.storage = NULL;
2282                 atomic_inc(&s->vdq.busy);
2283         }
2284
2285         return 1;
2286 }
2287
2288 /********************************/
2289 /* Categorising qbman results   */
2290 /********************************/
2291
2292 static inline int __qbman_result_is_x(const struct qbman_result *dq,
2293                                       uint8_t x)
2294 {
2295         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
2296
2297         return (response_verb == x);
2298 }
2299
2300 int qbman_result_is_DQ(const struct qbman_result *dq)
2301 {
2302         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
2303 }
2304
2305 int qbman_result_is_FQDAN(const struct qbman_result *dq)
2306 {
2307         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
2308 }
2309
2310 int qbman_result_is_CDAN(const struct qbman_result *dq)
2311 {
2312         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
2313 }
2314
2315 int qbman_result_is_CSCN(const struct qbman_result *dq)
2316 {
2317         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
2318                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
2319 }
2320
2321 int qbman_result_is_BPSCN(const struct qbman_result *dq)
2322 {
2323         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
2324 }
2325
2326 int qbman_result_is_CGCU(const struct qbman_result *dq)
2327 {
2328         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
2329 }
2330
2331 int qbman_result_is_FQRN(const struct qbman_result *dq)
2332 {
2333         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
2334 }
2335
2336 int qbman_result_is_FQRNI(const struct qbman_result *dq)
2337 {
2338         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
2339 }
2340
2341 int qbman_result_is_FQPN(const struct qbman_result *dq)
2342 {
2343         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
2344 }
2345
2346 /*********************************/
2347 /* Parsing frame dequeue results */
2348 /*********************************/
2349
2350 /* These APIs assume qbman_result_is_DQ() is TRUE */
2351
2352 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
2353 {
2354         return dq->dq.stat;
2355 }
2356
2357 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
2358 {
2359         return dq->dq.seqnum;
2360 }
2361
2362 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
2363 {
2364         return dq->dq.oprid;
2365 }
2366
2367 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
2368 {
2369         return dq->dq.fqid;
2370 }
2371
2372 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
2373 {
2374         return dq->dq.fq_byte_cnt;
2375 }
2376
2377 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
2378 {
2379         return dq->dq.fq_frm_cnt;
2380 }
2381
2382 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
2383 {
2384         return dq->dq.fqd_ctx;
2385 }
2386
2387 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
2388 {
2389         return (const struct qbman_fd *)&dq->dq.fd[0];
2390 }
2391
2392 /**************************************/
2393 /* Parsing state-change notifications */
2394 /**************************************/
2395 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
2396 {
2397         return scn->scn.state;
2398 }
2399
2400 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
2401 {
2402         return scn->scn.rid_tok;
2403 }
2404
2405 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
2406 {
2407         return scn->scn.ctx;
2408 }
2409
2410 /*****************/
2411 /* Parsing BPSCN */
2412 /*****************/
2413 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
2414 {
2415         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
2416 }
2417
2418 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
2419 {
2420         return !(int)(qbman_result_SCN_state(scn) & 0x1);
2421 }
2422
2423 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
2424 {
2425         return (int)(qbman_result_SCN_state(scn) & 0x2);
2426 }
2427
2428 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
2429 {
2430         return (int)(qbman_result_SCN_state(scn) & 0x4);
2431 }
2432
2433 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
2434 {
2435         return qbman_result_SCN_ctx(scn);
2436 }
2437
2438 /*****************/
2439 /* Parsing CGCU  */
2440 /*****************/
2441 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2442 {
2443         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2444 }
2445
2446 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2447 {
2448         return qbman_result_SCN_ctx(scn);
2449 }
2450
2451 /********************/
2452 /* Parsing EQ RESP  */
2453 /********************/
2454 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2455 {
2456         return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2457 }
2458
2459 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2460 {
2461         eqresp->eq_resp.rspid = val;
2462 }
2463
2464 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2465 {
2466         return eqresp->eq_resp.rspid;
2467 }
2468
2469 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2470 {
2471         if (eqresp->eq_resp.rc == 0xE)
2472                 return 0;
2473         else
2474                 return -1;
2475 }
2476
2477 /******************/
2478 /* Buffer release */
2479 /******************/
2480 #define QB_BR_RC_VALID_SHIFT  5
2481 #define QB_BR_RCDI_SHIFT      6
2482
2483 void qbman_release_desc_clear(struct qbman_release_desc *d)
2484 {
2485         memset(d, 0, sizeof(*d));
2486         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2487 }
2488
2489 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2490 {
2491         d->br.bpid = bpid;
2492 }
2493
2494 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2495 {
2496         if (enable)
2497                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2498         else
2499                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2500 }
2501
2502 #define RAR_IDX(rar)     ((rar) & 0x7)
2503 #define RAR_VB(rar)      ((rar) & 0x80)
2504 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2505
2506 static int qbman_swp_release_direct(struct qbman_swp *s,
2507                                     const struct qbman_release_desc *d,
2508                                     const uint64_t *buffers,
2509                                     unsigned int num_buffers)
2510 {
2511         uint32_t *p;
2512         const uint32_t *cl = qb_cl(d);
2513         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2514
2515         pr_debug("RAR=%08x\n", rar);
2516         if (!RAR_SUCCESS(rar))
2517                 return -EBUSY;
2518
2519         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2520
2521         /* Start the release command */
2522         p = qbman_cena_write_start_wo_shadow(&s->sys,
2523                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2524
2525         /* Copy the caller's buffer pointers to the command */
2526         u64_to_le32_copy(&p[2], buffers, num_buffers);
2527
2528         /* Set the verb byte, have to substitute in the valid-bit and the
2529          * number of buffers.
2530          */
2531         lwsync();
2532         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2533         qbman_cena_write_complete_wo_shadow(&s->sys,
2534                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2535
2536         return 0;
2537 }
2538
2539 static int qbman_swp_release_cinh_direct(struct qbman_swp *s,
2540                                     const struct qbman_release_desc *d,
2541                                     const uint64_t *buffers,
2542                                     unsigned int num_buffers)
2543 {
2544         uint32_t *p;
2545         const uint32_t *cl = qb_cl(d);
2546         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2547
2548         pr_debug("RAR=%08x\n", rar);
2549         if (!RAR_SUCCESS(rar))
2550                 return -EBUSY;
2551
2552         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2553
2554         /* Start the release command */
2555         p = qbman_cinh_write_start_wo_shadow(&s->sys,
2556                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2557
2558         /* Copy the caller's buffer pointers to the command */
2559         memcpy_byte_by_byte(&p[2], buffers, num_buffers * sizeof(uint64_t));
2560
2561         /* Set the verb byte, have to substitute in the valid-bit and the
2562          * number of buffers.
2563          */
2564         lwsync();
2565         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2566
2567         return 0;
2568 }
2569
2570 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2571                                       const struct qbman_release_desc *d,
2572                                       const uint64_t *buffers,
2573                                       unsigned int num_buffers)
2574 {
2575         uint32_t *p;
2576         const uint32_t *cl = qb_cl(d);
2577         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2578
2579         pr_debug("RAR=%08x\n", rar);
2580         if (!RAR_SUCCESS(rar))
2581                 return -EBUSY;
2582
2583         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2584
2585         /* Start the release command */
2586         p = qbman_cena_write_start_wo_shadow(&s->sys,
2587                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2588
2589         /* Copy the caller's buffer pointers to the command */
2590         u64_to_le32_copy(&p[2], buffers, num_buffers);
2591
2592         /* Set the verb byte, have to substitute in the valid-bit and the
2593          * number of buffers.
2594          */
2595         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2596         lwsync();
2597         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2598                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2599
2600         return 0;
2601 }
2602
2603 int qbman_swp_release(struct qbman_swp *s,
2604                              const struct qbman_release_desc *d,
2605                              const uint64_t *buffers,
2606                              unsigned int num_buffers)
2607 {
2608         if (!s->stash_off)
2609                 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2610         else
2611                 return qbman_swp_release_cinh_direct(s, d, buffers,
2612                                                 num_buffers);
2613 }
2614
2615 /*******************/
2616 /* Buffer acquires */
2617 /*******************/
2618 struct qbman_acquire_desc {
2619         uint8_t verb;
2620         uint8_t reserved;
2621         uint16_t bpid;
2622         uint8_t num;
2623         uint8_t reserved2[59];
2624 };
2625
2626 struct qbman_acquire_rslt {
2627         uint8_t verb;
2628         uint8_t rslt;
2629         uint16_t reserved;
2630         uint8_t num;
2631         uint8_t reserved2[3];
2632         uint64_t buf[7];
2633 };
2634
2635 static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid,
2636                                 uint64_t *buffers, unsigned int num_buffers)
2637 {
2638         struct qbman_acquire_desc *p;
2639         struct qbman_acquire_rslt *r;
2640
2641         if (!num_buffers || (num_buffers > 7))
2642                 return -EINVAL;
2643
2644         /* Start the management command */
2645         p = qbman_swp_mc_start(s);
2646
2647         if (!p)
2648                 return -EBUSY;
2649
2650         /* Encode the caller-provided attributes */
2651         p->bpid = bpid;
2652         p->num = num_buffers;
2653
2654         /* Complete the management command */
2655         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2656         if (!r) {
2657                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2658                        bpid);
2659                 return -EIO;
2660         }
2661
2662         /* Decode the outcome */
2663         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2664
2665         /* Determine success or failure */
2666         if (r->rslt != QBMAN_MC_RSLT_OK) {
2667                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2668                        bpid, r->rslt);
2669                 return -EIO;
2670         }
2671
2672         QBMAN_BUG_ON(r->num > num_buffers);
2673
2674         /* Copy the acquired buffers to the caller's array */
2675         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2676
2677         return (int)r->num;
2678 }
2679
2680 static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid,
2681                         uint64_t *buffers, unsigned int num_buffers)
2682 {
2683         struct qbman_acquire_desc *p;
2684         struct qbman_acquire_rslt *r;
2685
2686         if (!num_buffers || (num_buffers > 7))
2687                 return -EINVAL;
2688
2689         /* Start the management command */
2690         p = qbman_swp_mc_start(s);
2691
2692         if (!p)
2693                 return -EBUSY;
2694
2695         /* Encode the caller-provided attributes */
2696         p->bpid = bpid;
2697         p->num = num_buffers;
2698
2699         /* Complete the management command */
2700         r = qbman_swp_mc_complete_cinh(s, p, QBMAN_MC_ACQUIRE);
2701         if (!r) {
2702                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2703                        bpid);
2704                 return -EIO;
2705         }
2706
2707         /* Decode the outcome */
2708         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2709
2710         /* Determine success or failure */
2711         if (r->rslt != QBMAN_MC_RSLT_OK) {
2712                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2713                        bpid, r->rslt);
2714                 return -EIO;
2715         }
2716
2717         QBMAN_BUG_ON(r->num > num_buffers);
2718
2719         /* Copy the acquired buffers to the caller's array */
2720         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2721
2722         return (int)r->num;
2723 }
2724
2725 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2726                       unsigned int num_buffers)
2727 {
2728         if (!s->stash_off)
2729                 return qbman_swp_acquire_direct(s, bpid, buffers, num_buffers);
2730         else
2731                 return qbman_swp_acquire_cinh_direct(s, bpid, buffers,
2732                                         num_buffers);
2733 }
2734
2735 /*****************/
2736 /* FQ management */
2737 /*****************/
2738 struct qbman_alt_fq_state_desc {
2739         uint8_t verb;
2740         uint8_t reserved[3];
2741         uint32_t fqid;
2742         uint8_t reserved2[56];
2743 };
2744
2745 struct qbman_alt_fq_state_rslt {
2746         uint8_t verb;
2747         uint8_t rslt;
2748         uint8_t reserved[62];
2749 };
2750
2751 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2752
2753 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2754                                   uint8_t alt_fq_verb)
2755 {
2756         struct qbman_alt_fq_state_desc *p;
2757         struct qbman_alt_fq_state_rslt *r;
2758
2759         /* Start the management command */
2760         p = qbman_swp_mc_start(s);
2761         if (!p)
2762                 return -EBUSY;
2763
2764         p->fqid = fqid & ALT_FQ_FQID_MASK;
2765
2766         /* Complete the management command */
2767         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2768         if (!r) {
2769                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2770                        alt_fq_verb);
2771                 return -EIO;
2772         }
2773
2774         /* Decode the outcome */
2775         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2776
2777         /* Determine success or failure */
2778         if (r->rslt != QBMAN_MC_RSLT_OK) {
2779                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2780                        fqid, alt_fq_verb, r->rslt);
2781                 return -EIO;
2782         }
2783
2784         return 0;
2785 }
2786
2787 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2788 {
2789         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2790 }
2791
2792 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2793 {
2794         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2795 }
2796
2797 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2798 {
2799         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2800 }
2801
2802 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2803 {
2804         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2805 }
2806
2807 /**********************/
2808 /* Channel management */
2809 /**********************/
2810
2811 struct qbman_cdan_ctrl_desc {
2812         uint8_t verb;
2813         uint8_t reserved;
2814         uint16_t ch;
2815         uint8_t we;
2816         uint8_t ctrl;
2817         uint16_t reserved2;
2818         uint64_t cdan_ctx;
2819         uint8_t reserved3[48];
2820
2821 };
2822
2823 struct qbman_cdan_ctrl_rslt {
2824         uint8_t verb;
2825         uint8_t rslt;
2826         uint16_t ch;
2827         uint8_t reserved[60];
2828 };
2829
2830 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2831  * would be irresponsible to expose it.
2832  */
2833 #define CODE_CDAN_WE_EN    0x1
2834 #define CODE_CDAN_WE_CTX   0x4
2835
2836 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2837                               uint8_t we_mask, uint8_t cdan_en,
2838                               uint64_t ctx)
2839 {
2840         struct qbman_cdan_ctrl_desc *p;
2841         struct qbman_cdan_ctrl_rslt *r;
2842
2843         /* Start the management command */
2844         p = qbman_swp_mc_start(s);
2845         if (!p)
2846                 return -EBUSY;
2847
2848         /* Encode the caller-provided attributes */
2849         p->ch = channelid;
2850         p->we = we_mask;
2851         if (cdan_en)
2852                 p->ctrl = 1;
2853         else
2854                 p->ctrl = 0;
2855         p->cdan_ctx = ctx;
2856
2857         /* Complete the management command */
2858         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2859         if (!r) {
2860                 pr_err("qbman: wqchan config failed, no response\n");
2861                 return -EIO;
2862         }
2863
2864         /* Decode the outcome */
2865         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2866                      != QBMAN_WQCHAN_CONFIGURE);
2867
2868         /* Determine success or failure */
2869         if (r->rslt != QBMAN_MC_RSLT_OK) {
2870                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2871                        channelid, r->rslt);
2872                 return -EIO;
2873         }
2874
2875         return 0;
2876 }
2877
2878 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2879                                uint64_t ctx)
2880 {
2881         return qbman_swp_CDAN_set(s, channelid,
2882                                   CODE_CDAN_WE_CTX,
2883                                   0, ctx);
2884 }
2885
2886 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2887 {
2888         return qbman_swp_CDAN_set(s, channelid,
2889                                   CODE_CDAN_WE_EN,
2890                                   1, 0);
2891 }
2892
2893 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2894 {
2895         return qbman_swp_CDAN_set(s, channelid,
2896                                   CODE_CDAN_WE_EN,
2897                                   0, 0);
2898 }
2899
2900 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2901                                       uint64_t ctx)
2902 {
2903         return qbman_swp_CDAN_set(s, channelid,
2904                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2905                                   1, ctx);
2906 }
2907
2908 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2909 {
2910         return QBMAN_IDX_FROM_DQRR(dqrr);
2911 }
2912
2913 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2914 {
2915         struct qbman_result *dq;
2916
2917         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
2918         return dq;
2919 }