drivers: update copyright for NXP files
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018-2019 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 uint32_t qman_version;
65
66 /* Internal Function declaration */
67 static int
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69                 const struct qbman_eq_desc *d,
70                 const struct qbman_fd *fd);
71 static int
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73                 const struct qbman_eq_desc *d,
74                 const struct qbman_fd *fd);
75
76 static int
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78                 const struct qbman_eq_desc *d,
79                 const struct qbman_fd *fd);
80 static int
81 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
82                 const struct qbman_eq_desc *d,
83                 const struct qbman_fd *fd);
84 static int
85 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
86                 const struct qbman_eq_desc *d,
87                 const struct qbman_fd *fd);
88
89 static int
90 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
91                 const struct qbman_eq_desc *d,
92                 const struct qbman_fd *fd,
93                 uint32_t *flags,
94                 int num_frames);
95 static int
96 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
97                 const struct qbman_eq_desc *d,
98                 const struct qbman_fd *fd,
99                 uint32_t *flags,
100                 int num_frames);
101 static int
102 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
103                 const struct qbman_eq_desc *d,
104                 const struct qbman_fd *fd,
105                 uint32_t *flags,
106                 int num_frames);
107
108 static int
109 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
110                 const struct qbman_eq_desc *d,
111                 struct qbman_fd **fd,
112                 uint32_t *flags,
113                 int num_frames);
114 static int
115 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
116                 const struct qbman_eq_desc *d,
117                 struct qbman_fd **fd,
118                 uint32_t *flags,
119                 int num_frames);
120 static int
121 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
122                 const struct qbman_eq_desc *d,
123                 struct qbman_fd **fd,
124                 uint32_t *flags,
125                 int num_frames);
126
127 static int
128 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
129                 const struct qbman_eq_desc *d,
130                 const struct qbman_fd *fd,
131                 int num_frames);
132 static int
133 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
134                 const struct qbman_eq_desc *d,
135                 const struct qbman_fd *fd,
136                 int num_frames);
137 static int
138 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
139                 const struct qbman_eq_desc *d,
140                 const struct qbman_fd *fd,
141                 int num_frames);
142
143 static int
144 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
145 static int
146 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
147
148 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
149 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
150
151 static int
152 qbman_swp_release_direct(struct qbman_swp *s,
153                 const struct qbman_release_desc *d,
154                 const uint64_t *buffers, unsigned int num_buffers);
155 static int
156 qbman_swp_release_mem_back(struct qbman_swp *s,
157                 const struct qbman_release_desc *d,
158                 const uint64_t *buffers, unsigned int num_buffers);
159
160 /* Function pointers */
161 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
162                 const struct qbman_eq_desc *d,
163                 const struct qbman_fd *fd)
164         = qbman_swp_enqueue_array_mode_direct;
165
166 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
167                 const struct qbman_eq_desc *d,
168                 const struct qbman_fd *fd)
169         = qbman_swp_enqueue_ring_mode_direct;
170
171 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
172                 const struct qbman_eq_desc *d,
173                 const struct qbman_fd *fd,
174                 uint32_t *flags,
175                 int num_frames)
176         = qbman_swp_enqueue_multiple_direct;
177
178 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
179                 const struct qbman_eq_desc *d,
180                 struct qbman_fd **fd,
181                 uint32_t *flags,
182                 int num_frames)
183         = qbman_swp_enqueue_multiple_fd_direct;
184
185 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
186                 const struct qbman_eq_desc *d,
187                 const struct qbman_fd *fd,
188                 int num_frames)
189         = qbman_swp_enqueue_multiple_desc_direct;
190
191 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
192                 struct qbman_pull_desc *d)
193         = qbman_swp_pull_direct;
194
195 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
196                 = qbman_swp_dqrr_next_direct;
197
198 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
199                         const struct qbman_release_desc *d,
200                         const uint64_t *buffers, unsigned int num_buffers)
201                         = qbman_swp_release_direct;
202
203 /*********************************/
204 /* Portal constructor/destructor */
205 /*********************************/
206
207 /* Software portals should always be in the power-on state when we initialise,
208  * due to the CCSR-based portal reset functionality that MC has.
209  *
210  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
211  * valid-bits, so we need to support a workaround where we don't trust
212  * valid-bits when detecting new entries until any stale ring entries have been
213  * overwritten at least once. The idea is that we read PI for the first few
214  * entries, then switch to valid-bit after that. The trick is to clear the
215  * bug-work-around boolean once the PI wraps around the ring for the first time.
216  *
217  * Note: this still carries a slight additional cost once the decrementer hits
218  * zero.
219  */
220 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
221 {
222         int ret;
223         uint32_t eqcr_pi;
224         uint32_t mask_size;
225         struct qbman_swp *p = malloc(sizeof(*p));
226
227         if (!p)
228                 return NULL;
229
230         memset(p, 0, sizeof(struct qbman_swp));
231
232         p->desc = *d;
233 #ifdef QBMAN_CHECKING
234         p->mc.check = swp_mc_can_start;
235 #endif
236         p->mc.valid_bit = QB_VALID_BIT;
237         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
238         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
239         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
240         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
241                         && (d->cena_access_mode == qman_cena_fastest_access))
242                 p->mr.valid_bit = QB_VALID_BIT;
243
244         atomic_set(&p->vdq.busy, 1);
245         p->vdq.valid_bit = QB_VALID_BIT;
246         p->dqrr.valid_bit = QB_VALID_BIT;
247         qman_version = p->desc.qman_version;
248         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
249                 p->dqrr.dqrr_size = 4;
250                 p->dqrr.reset_bug = 1;
251         } else {
252                 p->dqrr.dqrr_size = 8;
253                 p->dqrr.reset_bug = 0;
254         }
255
256         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
257         if (ret) {
258                 free(p);
259                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
260                 return NULL;
261         }
262
263         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
264          * in default state which is an error
265          */
266         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
267                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
268                 free(p);
269                 return NULL;
270         }
271
272         /* SDQCR needs to be initialized to 0 when no channels are
273          * being dequeued from or else the QMan HW will indicate an
274          * error.  The values that were calculated above will be
275          * applied when dequeues from a specific channel are enabled.
276          */
277         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
278
279         p->eqcr.pi_ring_size = 8;
280         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
281                         && (d->cena_access_mode == qman_cena_fastest_access)) {
282                 p->eqcr.pi_ring_size = 32;
283                 qbman_swp_enqueue_array_mode_ptr =
284                                 qbman_swp_enqueue_array_mode_mem_back;
285                 qbman_swp_enqueue_ring_mode_ptr =
286                                 qbman_swp_enqueue_ring_mode_mem_back;
287                 qbman_swp_enqueue_multiple_ptr =
288                                 qbman_swp_enqueue_multiple_mem_back;
289                 qbman_swp_enqueue_multiple_fd_ptr =
290                                 qbman_swp_enqueue_multiple_fd_mem_back;
291                 qbman_swp_enqueue_multiple_desc_ptr =
292                                 qbman_swp_enqueue_multiple_desc_mem_back;
293                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
294                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
295                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
296         }
297
298         if (dpaa2_svr_family == SVR_LS1080A) {
299                 qbman_swp_enqueue_ring_mode_ptr =
300                                 qbman_swp_enqueue_ring_mode_cinh_direct;
301                 qbman_swp_enqueue_multiple_ptr =
302                                 qbman_swp_enqueue_multiple_cinh_direct;
303                 qbman_swp_enqueue_multiple_fd_ptr =
304                                 qbman_swp_enqueue_multiple_fd_cinh_direct;
305                 qbman_swp_enqueue_multiple_desc_ptr =
306                                 qbman_swp_enqueue_multiple_desc_cinh_direct;
307         }
308
309         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
310                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
311         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
312         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
313         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
314         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
315                         && (d->cena_access_mode == qman_cena_fastest_access))
316                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
317                                              & p->eqcr.pi_ci_mask;
318         else
319                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
320                                              & p->eqcr.pi_ci_mask;
321         p->eqcr.available = p->eqcr.pi_ring_size -
322                                 qm_cyc_diff(p->eqcr.pi_ring_size,
323                                 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
324                                 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
325
326         portal_idx_map[p->desc.idx] = p;
327         return p;
328 }
329
330 void qbman_swp_finish(struct qbman_swp *p)
331 {
332 #ifdef QBMAN_CHECKING
333         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
334 #endif
335         qbman_swp_sys_finish(&p->sys);
336         portal_idx_map[p->desc.idx] = NULL;
337         free(p);
338 }
339
340 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
341 {
342         return &p->desc;
343 }
344
345 /**************/
346 /* Interrupts */
347 /**************/
348
349 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
350 {
351         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
352 }
353
354 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
355 {
356         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
357 }
358
359 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
360 {
361         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
362 }
363
364 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
365 {
366         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
367 }
368
369 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
370 {
371         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
372 }
373
374 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
375 {
376         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
377 }
378
379 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
380 {
381         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
382 }
383
384 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
385 {
386         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
387 }
388
389 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
390 {
391         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
392 }
393
394 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
395 {
396         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
397 }
398
399 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
400 {
401         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
402 }
403
404 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
405 {
406         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
407                          inhibit ? 0xffffffff : 0);
408 }
409
410 /***********************/
411 /* Management commands */
412 /***********************/
413
414 /*
415  * Internal code common to all types of management commands.
416  */
417
418 void *qbman_swp_mc_start(struct qbman_swp *p)
419 {
420         void *ret;
421 #ifdef QBMAN_CHECKING
422         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
423 #endif
424         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
425                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
426                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
427         else
428                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
429 #ifdef QBMAN_CHECKING
430         if (!ret)
431                 p->mc.check = swp_mc_can_submit;
432 #endif
433         return ret;
434 }
435
436 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
437 {
438         uint8_t *v = cmd;
439 #ifdef QBMAN_CHECKING
440         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
441 #endif
442         /* TBD: "|=" is going to hurt performance. Need to move as many fields
443          * out of word zero, and for those that remain, the "OR" needs to occur
444          * at the caller side. This debug check helps to catch cases where the
445          * caller wants to OR but has forgotten to do so.
446          */
447         QBMAN_BUG_ON((*v & cmd_verb) != *v);
448         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
449                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
450                 *v = cmd_verb | p->mr.valid_bit;
451                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
452                 dma_wmb();
453                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
454         } else {
455                 dma_wmb();
456                 *v = cmd_verb | p->mc.valid_bit;
457                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
458                 clean(cmd);
459         }
460 #ifdef QBMAN_CHECKING
461         p->mc.check = swp_mc_can_poll;
462 #endif
463 }
464
465 void *qbman_swp_mc_result(struct qbman_swp *p)
466 {
467         uint32_t *ret, verb;
468 #ifdef QBMAN_CHECKING
469         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
470 #endif
471         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
472                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
473                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
474                 /* Command completed if the valid bit is toggled */
475                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
476                         return NULL;
477                 /* Remove the valid-bit -
478                  * command completed iff the rest is non-zero
479                  */
480                 verb = ret[0] & ~QB_VALID_BIT;
481                 if (!verb)
482                         return NULL;
483                 p->mr.valid_bit ^= QB_VALID_BIT;
484         } else {
485                 qbman_cena_invalidate_prefetch(&p->sys,
486                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
487                 ret = qbman_cena_read(&p->sys,
488                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
489                 /* Remove the valid-bit -
490                  * command completed iff the rest is non-zero
491                  */
492                 verb = ret[0] & ~QB_VALID_BIT;
493                 if (!verb)
494                         return NULL;
495                 p->mc.valid_bit ^= QB_VALID_BIT;
496         }
497 #ifdef QBMAN_CHECKING
498         p->mc.check = swp_mc_can_start;
499 #endif
500         return ret;
501 }
502
503 /***********/
504 /* Enqueue */
505 /***********/
506
507 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
508 enum qb_enqueue_commands {
509         enqueue_empty = 0,
510         enqueue_response_always = 1,
511         enqueue_rejects_to_fq = 2
512 };
513
514 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
515 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
516 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
517 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
518 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
519 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
520 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
521 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
522
523 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
524 {
525         memset(d, 0, sizeof(*d));
526 }
527
528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
529 {
530         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
531         if (respond_success)
532                 d->eq.verb |= enqueue_response_always;
533         else
534                 d->eq.verb |= enqueue_rejects_to_fq;
535 }
536
537 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
538                            uint16_t opr_id, uint16_t seqnum, int incomplete)
539 {
540         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
541         if (respond_success)
542                 d->eq.verb |= enqueue_response_always;
543         else
544                 d->eq.verb |= enqueue_rejects_to_fq;
545
546         d->eq.orpid = opr_id;
547         d->eq.seqnum = seqnum;
548         if (incomplete)
549                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
550         else
551                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
552 }
553
554 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
555                                 uint16_t seqnum)
556 {
557         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
558         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
559         d->eq.orpid = opr_id;
560         d->eq.seqnum = seqnum;
561         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
562         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
563 }
564
565 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
566                                 uint16_t seqnum)
567 {
568         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
569         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
570         d->eq.orpid = opr_id;
571         d->eq.seqnum = seqnum;
572         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
573         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
574 }
575
576 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
577                                 dma_addr_t storage_phys,
578                                 int stash)
579 {
580         d->eq.rsp_addr = storage_phys;
581         d->eq.wae = stash;
582 }
583
584 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
585 {
586         d->eq.rspid = token;
587 }
588
589 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
590 {
591         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
592         d->eq.tgtid = fqid;
593 }
594
595 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
596                           uint16_t qd_bin, uint8_t qd_prio)
597 {
598         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
599         d->eq.tgtid = qdid;
600         d->eq.qdbin = qd_bin;
601         d->eq.qpri = qd_prio;
602 }
603
604 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
605 {
606         if (enable)
607                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
608         else
609                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
610 }
611
612 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
613                            uint8_t dqrr_idx, int park)
614 {
615         if (enable) {
616                 d->eq.dca = dqrr_idx;
617                 if (park)
618                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
619                 else
620                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
621                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
622         } else {
623                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
624         }
625 }
626
627 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
628 #define EQAR_VB(eqar)      ((eqar) & 0x80)
629 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
630
631 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
632                                                    uint8_t idx)
633 {
634         if (idx < 16)
635                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
636                                      QMAN_RT_MODE);
637         else
638                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
639                                      (idx - 16) * 4,
640                                      QMAN_RT_MODE);
641 }
642
643
644 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
645                                                const struct qbman_eq_desc *d,
646                                                const struct qbman_fd *fd)
647 {
648         uint32_t *p;
649         const uint32_t *cl = qb_cl(d);
650         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
651
652         pr_debug("EQAR=%08x\n", eqar);
653         if (!EQAR_SUCCESS(eqar))
654                 return -EBUSY;
655         p = qbman_cena_write_start_wo_shadow(&s->sys,
656                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
657         memcpy(&p[1], &cl[1], 28);
658         memcpy(&p[8], fd, sizeof(*fd));
659
660         /* Set the verb byte, have to substitute in the valid-bit */
661         dma_wmb();
662         p[0] = cl[0] | EQAR_VB(eqar);
663         qbman_cena_write_complete_wo_shadow(&s->sys,
664                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
665         return 0;
666 }
667 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
668                                                  const struct qbman_eq_desc *d,
669                                                  const struct qbman_fd *fd)
670 {
671         uint32_t *p;
672         const uint32_t *cl = qb_cl(d);
673         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
674
675         pr_debug("EQAR=%08x\n", eqar);
676         if (!EQAR_SUCCESS(eqar))
677                 return -EBUSY;
678         p = qbman_cena_write_start_wo_shadow(&s->sys,
679                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
680         memcpy(&p[1], &cl[1], 28);
681         memcpy(&p[8], fd, sizeof(*fd));
682
683         /* Set the verb byte, have to substitute in the valid-bit */
684         p[0] = cl[0] | EQAR_VB(eqar);
685         dma_wmb();
686         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
687         return 0;
688 }
689
690 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
691                                                const struct qbman_eq_desc *d,
692                                                const struct qbman_fd *fd)
693 {
694         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
695 }
696
697 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
698                                               const struct qbman_eq_desc *d,
699                                               const struct qbman_fd *fd)
700 {
701         uint32_t *p;
702         const uint32_t *cl = qb_cl(d);
703         uint32_t eqcr_ci, full_mask, half_mask;
704
705         half_mask = (s->eqcr.pi_ci_mask>>1);
706         full_mask = s->eqcr.pi_ci_mask;
707         if (!s->eqcr.available) {
708                 eqcr_ci = s->eqcr.ci;
709                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
710                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
711                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
712                                 eqcr_ci, s->eqcr.ci);
713                 if (!s->eqcr.available)
714                         return -EBUSY;
715         }
716
717         p = qbman_cena_write_start_wo_shadow(&s->sys,
718                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
719         memcpy(&p[1], &cl[1], 28);
720         memcpy(&p[8], fd, sizeof(*fd));
721         lwsync();
722
723         /* Set the verb byte, have to substitute in the valid-bit */
724         p[0] = cl[0] | s->eqcr.pi_vb;
725         qbman_cena_write_complete_wo_shadow(&s->sys,
726                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
727         s->eqcr.pi++;
728         s->eqcr.pi &= full_mask;
729         s->eqcr.available--;
730         if (!(s->eqcr.pi & half_mask))
731                 s->eqcr.pi_vb ^= QB_VALID_BIT;
732
733         return 0;
734 }
735
736 static int qbman_swp_enqueue_ring_mode_cinh_direct(
737                 struct qbman_swp *s,
738                 const struct qbman_eq_desc *d,
739                 const struct qbman_fd *fd)
740 {
741         uint32_t *p;
742         const uint32_t *cl = qb_cl(d);
743         uint32_t eqcr_ci, full_mask, half_mask;
744
745         half_mask = (s->eqcr.pi_ci_mask>>1);
746         full_mask = s->eqcr.pi_ci_mask;
747         if (!s->eqcr.available) {
748                 eqcr_ci = s->eqcr.ci;
749                 s->eqcr.ci = qbman_cinh_read(&s->sys,
750                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
751                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
752                                 eqcr_ci, s->eqcr.ci);
753                 if (!s->eqcr.available)
754                         return -EBUSY;
755         }
756
757         p = qbman_cena_write_start_wo_shadow(&s->sys,
758                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
759         memcpy(&p[1], &cl[1], 28);
760         memcpy(&p[8], fd, sizeof(*fd));
761         lwsync();
762
763         /* Set the verb byte, have to substitute in the valid-bit */
764         p[0] = cl[0] | s->eqcr.pi_vb;
765         qbman_cena_write_complete_wo_shadow(&s->sys,
766                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
767         s->eqcr.pi++;
768         s->eqcr.pi &= full_mask;
769         s->eqcr.available--;
770         if (!(s->eqcr.pi & half_mask))
771                 s->eqcr.pi_vb ^= QB_VALID_BIT;
772
773         return 0;
774 }
775
776 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
777                                                 const struct qbman_eq_desc *d,
778                                                 const struct qbman_fd *fd)
779 {
780         uint32_t *p;
781         const uint32_t *cl = qb_cl(d);
782         uint32_t eqcr_ci, full_mask, half_mask;
783
784         half_mask = (s->eqcr.pi_ci_mask>>1);
785         full_mask = s->eqcr.pi_ci_mask;
786         if (!s->eqcr.available) {
787                 eqcr_ci = s->eqcr.ci;
788                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
789                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
790                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
791                                 eqcr_ci, s->eqcr.ci);
792                 if (!s->eqcr.available)
793                         return -EBUSY;
794         }
795
796         p = qbman_cena_write_start_wo_shadow(&s->sys,
797                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
798         memcpy(&p[1], &cl[1], 28);
799         memcpy(&p[8], fd, sizeof(*fd));
800
801         /* Set the verb byte, have to substitute in the valid-bit */
802         p[0] = cl[0] | s->eqcr.pi_vb;
803         s->eqcr.pi++;
804         s->eqcr.pi &= full_mask;
805         s->eqcr.available--;
806         if (!(s->eqcr.pi & half_mask))
807                 s->eqcr.pi_vb ^= QB_VALID_BIT;
808         dma_wmb();
809         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
810                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
811         return 0;
812 }
813
814 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
815                                        const struct qbman_eq_desc *d,
816                                        const struct qbman_fd *fd)
817 {
818         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
819 }
820
821 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
822                       const struct qbman_fd *fd)
823 {
824         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
825                 return qbman_swp_enqueue_array_mode(s, d, fd);
826         else    /* Use ring mode by default */
827                 return qbman_swp_enqueue_ring_mode(s, d, fd);
828 }
829
830 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
831                                              const struct qbman_eq_desc *d,
832                                              const struct qbman_fd *fd,
833                                              uint32_t *flags,
834                                              int num_frames)
835 {
836         uint32_t *p = NULL;
837         const uint32_t *cl = qb_cl(d);
838         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
839         int i, num_enqueued = 0;
840         uint64_t addr_cena;
841
842         half_mask = (s->eqcr.pi_ci_mask>>1);
843         full_mask = s->eqcr.pi_ci_mask;
844         if (!s->eqcr.available) {
845                 eqcr_ci = s->eqcr.ci;
846                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
847                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
848                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
849                                 eqcr_ci, s->eqcr.ci);
850                 if (!s->eqcr.available)
851                         return 0;
852         }
853
854         eqcr_pi = s->eqcr.pi;
855         num_enqueued = (s->eqcr.available < num_frames) ?
856                         s->eqcr.available : num_frames;
857         s->eqcr.available -= num_enqueued;
858         /* Fill in the EQCR ring */
859         for (i = 0; i < num_enqueued; i++) {
860                 p = qbman_cena_write_start_wo_shadow(&s->sys,
861                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
862                 memcpy(&p[1], &cl[1], 28);
863                 memcpy(&p[8], &fd[i], sizeof(*fd));
864                 eqcr_pi++;
865         }
866
867         lwsync();
868
869         /* Set the verb byte, have to substitute in the valid-bit */
870         eqcr_pi = s->eqcr.pi;
871         for (i = 0; i < num_enqueued; i++) {
872                 p = qbman_cena_write_start_wo_shadow(&s->sys,
873                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
874                 p[0] = cl[0] | s->eqcr.pi_vb;
875                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
876                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
877
878                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
879                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
880                 }
881                 eqcr_pi++;
882                 if (!(eqcr_pi & half_mask))
883                         s->eqcr.pi_vb ^= QB_VALID_BIT;
884         }
885
886         /* Flush all the cacheline without load/store in between */
887         eqcr_pi = s->eqcr.pi;
888         addr_cena = (size_t)s->sys.addr_cena;
889         for (i = 0; i < num_enqueued; i++) {
890                 dcbf((uintptr_t)(addr_cena +
891                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
892                 eqcr_pi++;
893         }
894         s->eqcr.pi = eqcr_pi & full_mask;
895
896         return num_enqueued;
897 }
898
899 static int qbman_swp_enqueue_multiple_cinh_direct(
900                 struct qbman_swp *s,
901                 const struct qbman_eq_desc *d,
902                 const struct qbman_fd *fd,
903                 uint32_t *flags,
904                 int num_frames)
905 {
906         uint32_t *p = NULL;
907         const uint32_t *cl = qb_cl(d);
908         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
909         int i, num_enqueued = 0;
910         uint64_t addr_cena;
911
912         half_mask = (s->eqcr.pi_ci_mask>>1);
913         full_mask = s->eqcr.pi_ci_mask;
914         if (!s->eqcr.available) {
915                 eqcr_ci = s->eqcr.ci;
916                 s->eqcr.ci = qbman_cinh_read(&s->sys,
917                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
918                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
919                                 eqcr_ci, s->eqcr.ci);
920                 if (!s->eqcr.available)
921                         return 0;
922         }
923
924         eqcr_pi = s->eqcr.pi;
925         num_enqueued = (s->eqcr.available < num_frames) ?
926                         s->eqcr.available : num_frames;
927         s->eqcr.available -= num_enqueued;
928         /* Fill in the EQCR ring */
929         for (i = 0; i < num_enqueued; i++) {
930                 p = qbman_cena_write_start_wo_shadow(&s->sys,
931                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
932                 memcpy(&p[1], &cl[1], 28);
933                 memcpy(&p[8], &fd[i], sizeof(*fd));
934                 eqcr_pi++;
935         }
936
937         lwsync();
938
939         /* Set the verb byte, have to substitute in the valid-bit */
940         eqcr_pi = s->eqcr.pi;
941         for (i = 0; i < num_enqueued; i++) {
942                 p = qbman_cena_write_start_wo_shadow(&s->sys,
943                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
944                 p[0] = cl[0] | s->eqcr.pi_vb;
945                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
946                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
947
948                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
949                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
950                 }
951                 eqcr_pi++;
952                 if (!(eqcr_pi & half_mask))
953                         s->eqcr.pi_vb ^= QB_VALID_BIT;
954         }
955
956         /* Flush all the cacheline without load/store in between */
957         eqcr_pi = s->eqcr.pi;
958         addr_cena = (size_t)s->sys.addr_cena;
959         for (i = 0; i < num_enqueued; i++) {
960                 dcbf(addr_cena +
961                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
962                 eqcr_pi++;
963         }
964         s->eqcr.pi = eqcr_pi & full_mask;
965
966         return num_enqueued;
967 }
968
969 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
970                                                const struct qbman_eq_desc *d,
971                                                const struct qbman_fd *fd,
972                                                uint32_t *flags,
973                                                int num_frames)
974 {
975         uint32_t *p = NULL;
976         const uint32_t *cl = qb_cl(d);
977         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
978         int i, num_enqueued = 0;
979
980         half_mask = (s->eqcr.pi_ci_mask>>1);
981         full_mask = s->eqcr.pi_ci_mask;
982         if (!s->eqcr.available) {
983                 eqcr_ci = s->eqcr.ci;
984                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
985                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
986                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
987                                         eqcr_ci, s->eqcr.ci);
988                 if (!s->eqcr.available)
989                         return 0;
990         }
991
992         eqcr_pi = s->eqcr.pi;
993         num_enqueued = (s->eqcr.available < num_frames) ?
994                         s->eqcr.available : num_frames;
995         s->eqcr.available -= num_enqueued;
996         /* Fill in the EQCR ring */
997         for (i = 0; i < num_enqueued; i++) {
998                 p = qbman_cena_write_start_wo_shadow(&s->sys,
999                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1000                 memcpy(&p[1], &cl[1], 28);
1001                 memcpy(&p[8], &fd[i], sizeof(*fd));
1002                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1003                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1004
1005                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1006                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1007                 }
1008                 eqcr_pi++;
1009                 p[0] = cl[0] | s->eqcr.pi_vb;
1010
1011                 if (!(eqcr_pi & half_mask))
1012                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1013         }
1014         s->eqcr.pi = eqcr_pi & full_mask;
1015
1016         dma_wmb();
1017         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1018                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1019         return num_enqueued;
1020 }
1021
1022 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1023                                       const struct qbman_eq_desc *d,
1024                                       const struct qbman_fd *fd,
1025                                       uint32_t *flags,
1026                                       int num_frames)
1027 {
1028         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
1029 }
1030
1031 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1032                                                 const struct qbman_eq_desc *d,
1033                                                 struct qbman_fd **fd,
1034                                                 uint32_t *flags,
1035                                                 int num_frames)
1036 {
1037         uint32_t *p = NULL;
1038         const uint32_t *cl = qb_cl(d);
1039         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1040         int i, num_enqueued = 0;
1041         uint64_t addr_cena;
1042
1043         half_mask = (s->eqcr.pi_ci_mask>>1);
1044         full_mask = s->eqcr.pi_ci_mask;
1045         if (!s->eqcr.available) {
1046                 eqcr_ci = s->eqcr.ci;
1047                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1048                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1049                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1050                                 eqcr_ci, s->eqcr.ci);
1051                 if (!s->eqcr.available)
1052                         return 0;
1053         }
1054
1055         eqcr_pi = s->eqcr.pi;
1056         num_enqueued = (s->eqcr.available < num_frames) ?
1057                         s->eqcr.available : num_frames;
1058         s->eqcr.available -= num_enqueued;
1059         /* Fill in the EQCR ring */
1060         for (i = 0; i < num_enqueued; i++) {
1061                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1062                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1063                 memcpy(&p[1], &cl[1], 28);
1064                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1065                 eqcr_pi++;
1066         }
1067
1068         lwsync();
1069
1070         /* Set the verb byte, have to substitute in the valid-bit */
1071         eqcr_pi = s->eqcr.pi;
1072         for (i = 0; i < num_enqueued; i++) {
1073                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1074                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1075                 p[0] = cl[0] | s->eqcr.pi_vb;
1076                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1077                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1078
1079                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1080                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1081                 }
1082                 eqcr_pi++;
1083                 if (!(eqcr_pi & half_mask))
1084                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1085         }
1086
1087         /* Flush all the cacheline without load/store in between */
1088         eqcr_pi = s->eqcr.pi;
1089         addr_cena = (size_t)s->sys.addr_cena;
1090         for (i = 0; i < num_enqueued; i++) {
1091                 dcbf(addr_cena +
1092                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1093                 eqcr_pi++;
1094         }
1095         s->eqcr.pi = eqcr_pi & full_mask;
1096
1097         return num_enqueued;
1098 }
1099
1100 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1101                 struct qbman_swp *s,
1102                 const struct qbman_eq_desc *d,
1103                 struct qbman_fd **fd,
1104                 uint32_t *flags,
1105                 int num_frames)
1106 {
1107         uint32_t *p = NULL;
1108         const uint32_t *cl = qb_cl(d);
1109         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1110         int i, num_enqueued = 0;
1111         uint64_t addr_cena;
1112
1113         half_mask = (s->eqcr.pi_ci_mask>>1);
1114         full_mask = s->eqcr.pi_ci_mask;
1115         if (!s->eqcr.available) {
1116                 eqcr_ci = s->eqcr.ci;
1117                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1118                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1119                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1120                                 eqcr_ci, s->eqcr.ci);
1121                 if (!s->eqcr.available)
1122                         return 0;
1123         }
1124
1125         eqcr_pi = s->eqcr.pi;
1126         num_enqueued = (s->eqcr.available < num_frames) ?
1127                         s->eqcr.available : num_frames;
1128         s->eqcr.available -= num_enqueued;
1129         /* Fill in the EQCR ring */
1130         for (i = 0; i < num_enqueued; i++) {
1131                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1132                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1133                 memcpy(&p[1], &cl[1], 28);
1134                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1135                 eqcr_pi++;
1136         }
1137
1138         lwsync();
1139
1140         /* Set the verb byte, have to substitute in the valid-bit */
1141         eqcr_pi = s->eqcr.pi;
1142         for (i = 0; i < num_enqueued; i++) {
1143                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1144                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1145                 p[0] = cl[0] | s->eqcr.pi_vb;
1146                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1147                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1148
1149                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1150                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1151                 }
1152                 eqcr_pi++;
1153                 if (!(eqcr_pi & half_mask))
1154                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1155         }
1156
1157         /* Flush all the cacheline without load/store in between */
1158         eqcr_pi = s->eqcr.pi;
1159         addr_cena = (size_t)s->sys.addr_cena;
1160         for (i = 0; i < num_enqueued; i++) {
1161                 dcbf(addr_cena +
1162                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1163                 eqcr_pi++;
1164         }
1165         s->eqcr.pi = eqcr_pi & full_mask;
1166
1167         return num_enqueued;
1168 }
1169
1170 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1171                                                   const struct qbman_eq_desc *d,
1172                                                   struct qbman_fd **fd,
1173                                                   uint32_t *flags,
1174                                                   int num_frames)
1175 {
1176         uint32_t *p = NULL;
1177         const uint32_t *cl = qb_cl(d);
1178         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1179         int i, num_enqueued = 0;
1180
1181         half_mask = (s->eqcr.pi_ci_mask>>1);
1182         full_mask = s->eqcr.pi_ci_mask;
1183         if (!s->eqcr.available) {
1184                 eqcr_ci = s->eqcr.ci;
1185                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1186                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1187                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1188                                         eqcr_ci, s->eqcr.ci);
1189                 if (!s->eqcr.available)
1190                         return 0;
1191         }
1192
1193         eqcr_pi = s->eqcr.pi;
1194         num_enqueued = (s->eqcr.available < num_frames) ?
1195                         s->eqcr.available : num_frames;
1196         s->eqcr.available -= num_enqueued;
1197         /* Fill in the EQCR ring */
1198         for (i = 0; i < num_enqueued; i++) {
1199                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1200                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1201                 memcpy(&p[1], &cl[1], 28);
1202                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1203                 eqcr_pi++;
1204         }
1205
1206         /* Set the verb byte, have to substitute in the valid-bit */
1207         eqcr_pi = s->eqcr.pi;
1208         for (i = 0; i < num_enqueued; i++) {
1209                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1210                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1211                 p[0] = cl[0] | s->eqcr.pi_vb;
1212                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1213                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1214
1215                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1216                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1217                 }
1218                 eqcr_pi++;
1219                 if (!(eqcr_pi & half_mask))
1220                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1221         }
1222         s->eqcr.pi = eqcr_pi & full_mask;
1223
1224         dma_wmb();
1225         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1226                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1227         return num_enqueued;
1228 }
1229
1230 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1231                                          const struct qbman_eq_desc *d,
1232                                          struct qbman_fd **fd,
1233                                          uint32_t *flags,
1234                                          int num_frames)
1235 {
1236         return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
1237 }
1238
1239 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1240                                         const struct qbman_eq_desc *d,
1241                                         const struct qbman_fd *fd,
1242                                         int num_frames)
1243 {
1244         uint32_t *p;
1245         const uint32_t *cl;
1246         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1247         int i, num_enqueued = 0;
1248         uint64_t addr_cena;
1249
1250         half_mask = (s->eqcr.pi_ci_mask>>1);
1251         full_mask = s->eqcr.pi_ci_mask;
1252         if (!s->eqcr.available) {
1253                 eqcr_ci = s->eqcr.ci;
1254                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1255                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1256                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1257                                         eqcr_ci, s->eqcr.ci);
1258                 if (!s->eqcr.available)
1259                         return 0;
1260         }
1261
1262         eqcr_pi = s->eqcr.pi;
1263         num_enqueued = (s->eqcr.available < num_frames) ?
1264                         s->eqcr.available : num_frames;
1265         s->eqcr.available -= num_enqueued;
1266         /* Fill in the EQCR ring */
1267         for (i = 0; i < num_enqueued; i++) {
1268                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1269                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1270                 cl = qb_cl(&d[i]);
1271                 memcpy(&p[1], &cl[1], 28);
1272                 memcpy(&p[8], &fd[i], sizeof(*fd));
1273                 eqcr_pi++;
1274         }
1275
1276         lwsync();
1277
1278         /* Set the verb byte, have to substitute in the valid-bit */
1279         eqcr_pi = s->eqcr.pi;
1280         for (i = 0; i < num_enqueued; i++) {
1281                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1282                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1283                 cl = qb_cl(&d[i]);
1284                 p[0] = cl[0] | s->eqcr.pi_vb;
1285                 eqcr_pi++;
1286                 if (!(eqcr_pi & half_mask))
1287                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1288         }
1289
1290         /* Flush all the cacheline without load/store in between */
1291         eqcr_pi = s->eqcr.pi;
1292         addr_cena = (size_t)s->sys.addr_cena;
1293         for (i = 0; i < num_enqueued; i++) {
1294                 dcbf((uintptr_t)(addr_cena +
1295                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1296                 eqcr_pi++;
1297         }
1298         s->eqcr.pi = eqcr_pi & full_mask;
1299
1300         return num_enqueued;
1301 }
1302
1303 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1304                 struct qbman_swp *s,
1305                 const struct qbman_eq_desc *d,
1306                 const struct qbman_fd *fd,
1307                 int num_frames)
1308 {
1309         uint32_t *p;
1310         const uint32_t *cl;
1311         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1312         int i, num_enqueued = 0;
1313         uint64_t addr_cena;
1314
1315         half_mask = (s->eqcr.pi_ci_mask>>1);
1316         full_mask = s->eqcr.pi_ci_mask;
1317         if (!s->eqcr.available) {
1318                 eqcr_ci = s->eqcr.ci;
1319                 s->eqcr.ci = qbman_cinh_read(&s->sys,
1320                                 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1321                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1322                                         eqcr_ci, s->eqcr.ci);
1323                 if (!s->eqcr.available)
1324                         return 0;
1325         }
1326
1327         eqcr_pi = s->eqcr.pi;
1328         num_enqueued = (s->eqcr.available < num_frames) ?
1329                         s->eqcr.available : num_frames;
1330         s->eqcr.available -= num_enqueued;
1331         /* Fill in the EQCR ring */
1332         for (i = 0; i < num_enqueued; i++) {
1333                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1334                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1335                 cl = qb_cl(&d[i]);
1336                 memcpy(&p[1], &cl[1], 28);
1337                 memcpy(&p[8], &fd[i], sizeof(*fd));
1338                 eqcr_pi++;
1339         }
1340
1341         lwsync();
1342
1343         /* Set the verb byte, have to substitute in the valid-bit */
1344         eqcr_pi = s->eqcr.pi;
1345         for (i = 0; i < num_enqueued; i++) {
1346                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1347                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1348                 cl = qb_cl(&d[i]);
1349                 p[0] = cl[0] | s->eqcr.pi_vb;
1350                 eqcr_pi++;
1351                 if (!(eqcr_pi & half_mask))
1352                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1353         }
1354
1355         /* Flush all the cacheline without load/store in between */
1356         eqcr_pi = s->eqcr.pi;
1357         addr_cena = (size_t)s->sys.addr_cena;
1358         for (i = 0; i < num_enqueued; i++) {
1359                 dcbf(addr_cena +
1360                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1361                 eqcr_pi++;
1362         }
1363         s->eqcr.pi = eqcr_pi & full_mask;
1364
1365         return num_enqueued;
1366 }
1367
1368 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1369                                         const struct qbman_eq_desc *d,
1370                                         const struct qbman_fd *fd,
1371                                         int num_frames)
1372 {
1373         uint32_t *p;
1374         const uint32_t *cl;
1375         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1376         int i, num_enqueued = 0;
1377
1378         half_mask = (s->eqcr.pi_ci_mask>>1);
1379         full_mask = s->eqcr.pi_ci_mask;
1380         if (!s->eqcr.available) {
1381                 eqcr_ci = s->eqcr.ci;
1382                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1383                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1384                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1385                                         eqcr_ci, s->eqcr.ci);
1386                 if (!s->eqcr.available)
1387                         return 0;
1388         }
1389
1390         eqcr_pi = s->eqcr.pi;
1391         num_enqueued = (s->eqcr.available < num_frames) ?
1392                         s->eqcr.available : num_frames;
1393         s->eqcr.available -= num_enqueued;
1394         /* Fill in the EQCR ring */
1395         for (i = 0; i < num_enqueued; i++) {
1396                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1397                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1398                 cl = qb_cl(&d[i]);
1399                 memcpy(&p[1], &cl[1], 28);
1400                 memcpy(&p[8], &fd[i], sizeof(*fd));
1401                 eqcr_pi++;
1402         }
1403
1404         /* Set the verb byte, have to substitute in the valid-bit */
1405         eqcr_pi = s->eqcr.pi;
1406         for (i = 0; i < num_enqueued; i++) {
1407                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1408                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1409                 cl = qb_cl(&d[i]);
1410                 p[0] = cl[0] | s->eqcr.pi_vb;
1411                 eqcr_pi++;
1412                 if (!(eqcr_pi & half_mask))
1413                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1414         }
1415
1416         s->eqcr.pi = eqcr_pi & full_mask;
1417
1418         dma_wmb();
1419         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1420                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1421
1422         return num_enqueued;
1423 }
1424 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1425                                            const struct qbman_eq_desc *d,
1426                                            const struct qbman_fd *fd,
1427                                            int num_frames)
1428 {
1429         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
1430 }
1431
1432 /*************************/
1433 /* Static (push) dequeue */
1434 /*************************/
1435
1436 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1437 {
1438         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1439
1440         QBMAN_BUG_ON(channel_idx > 15);
1441         *enabled = src | (1 << channel_idx);
1442 }
1443
1444 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1445 {
1446         uint16_t dqsrc;
1447
1448         QBMAN_BUG_ON(channel_idx > 15);
1449         if (enable)
1450                 s->sdq |= 1 << channel_idx;
1451         else
1452                 s->sdq &= ~(1 << channel_idx);
1453
1454         /* Read make the complete src map.  If no channels are enabled
1455          * the SDQCR must be 0 or else QMan will assert errors
1456          */
1457         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1458         if (dqsrc != 0)
1459                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1460         else
1461                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1462 }
1463
1464 /***************************/
1465 /* Volatile (pull) dequeue */
1466 /***************************/
1467
1468 /* These should be const, eventually */
1469 #define QB_VDQCR_VERB_DCT_SHIFT    0
1470 #define QB_VDQCR_VERB_DT_SHIFT     2
1471 #define QB_VDQCR_VERB_RLS_SHIFT    4
1472 #define QB_VDQCR_VERB_WAE_SHIFT    5
1473 #define QB_VDQCR_VERB_RAD_SHIFT    6
1474
1475 enum qb_pull_dt_e {
1476         qb_pull_dt_channel,
1477         qb_pull_dt_workqueue,
1478         qb_pull_dt_framequeue
1479 };
1480
1481 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1482 {
1483         memset(d, 0, sizeof(*d));
1484 }
1485
1486 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1487                                  struct qbman_result *storage,
1488                                  dma_addr_t storage_phys,
1489                                  int stash)
1490 {
1491         d->pull.rsp_addr_virt = (size_t)storage;
1492
1493         if (!storage) {
1494                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1495                 return;
1496         }
1497         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1498         if (stash)
1499                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1500         else
1501                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1502
1503         d->pull.rsp_addr = storage_phys;
1504 }
1505
1506 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1507                                    uint8_t numframes)
1508 {
1509         d->pull.numf = numframes - 1;
1510 }
1511
1512 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1513 {
1514         d->pull.tok = token;
1515 }
1516
1517 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1518 {
1519         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1520         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1521         d->pull.dq_src = fqid;
1522 }
1523
1524 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1525                             enum qbman_pull_type_e dct)
1526 {
1527         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1528         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1529         d->pull.dq_src = wqid;
1530 }
1531
1532 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1533                                  enum qbman_pull_type_e dct)
1534 {
1535         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1536         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1537         d->pull.dq_src = chid;
1538 }
1539
1540 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1541 {
1542         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1543                 if (rad)
1544                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1545                 else
1546                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1547         } else {
1548                 printf("The RAD feature is not valid when RLS = 0\n");
1549         }
1550 }
1551
1552 static int qbman_swp_pull_direct(struct qbman_swp *s,
1553                                  struct qbman_pull_desc *d)
1554 {
1555         uint32_t *p;
1556         uint32_t *cl = qb_cl(d);
1557
1558         if (!atomic_dec_and_test(&s->vdq.busy)) {
1559                 atomic_inc(&s->vdq.busy);
1560                 return -EBUSY;
1561         }
1562
1563         d->pull.tok = s->sys.idx + 1;
1564         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1565         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1566         memcpy(&p[1], &cl[1], 12);
1567
1568         /* Set the verb byte, have to substitute in the valid-bit */
1569         lwsync();
1570         p[0] = cl[0] | s->vdq.valid_bit;
1571         s->vdq.valid_bit ^= QB_VALID_BIT;
1572         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1573
1574         return 0;
1575 }
1576
1577 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1578                                    struct qbman_pull_desc *d)
1579 {
1580         uint32_t *p;
1581         uint32_t *cl = qb_cl(d);
1582
1583         if (!atomic_dec_and_test(&s->vdq.busy)) {
1584                 atomic_inc(&s->vdq.busy);
1585                 return -EBUSY;
1586         }
1587
1588         d->pull.tok = s->sys.idx + 1;
1589         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1590         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1591         memcpy(&p[1], &cl[1], 12);
1592
1593         /* Set the verb byte, have to substitute in the valid-bit */
1594         p[0] = cl[0] | s->vdq.valid_bit;
1595         s->vdq.valid_bit ^= QB_VALID_BIT;
1596         dma_wmb();
1597         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1598
1599         return 0;
1600 }
1601
1602 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1603 {
1604         return qbman_swp_pull_ptr(s, d);
1605 }
1606
1607 /****************/
1608 /* Polling DQRR */
1609 /****************/
1610
1611 #define QMAN_DQRR_PI_MASK              0xf
1612
1613 #define QBMAN_RESULT_DQ        0x60
1614 #define QBMAN_RESULT_FQRN      0x21
1615 #define QBMAN_RESULT_FQRNI     0x22
1616 #define QBMAN_RESULT_FQPN      0x24
1617 #define QBMAN_RESULT_FQDAN     0x25
1618 #define QBMAN_RESULT_CDAN      0x26
1619 #define QBMAN_RESULT_CSCN_MEM  0x27
1620 #define QBMAN_RESULT_CGCU      0x28
1621 #define QBMAN_RESULT_BPSCN     0x29
1622 #define QBMAN_RESULT_CSCN_WQ   0x2a
1623
1624 #include <rte_prefetch.h>
1625
1626 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1627 {
1628         const struct qbman_result *p;
1629
1630         p = qbman_cena_read_wo_shadow(&s->sys,
1631                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1632         rte_prefetch0(p);
1633 }
1634
1635 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1636  * only once, so repeated calls can return a sequence of DQRR entries, without
1637  * requiring they be consumed immediately or in any particular order.
1638  */
1639 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1640 {
1641         return qbman_swp_dqrr_next_ptr(s);
1642 }
1643
1644 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1645 {
1646         uint32_t verb;
1647         uint32_t response_verb;
1648         uint32_t flags;
1649         const struct qbman_result *p;
1650
1651         /* Before using valid-bit to detect if something is there, we have to
1652          * handle the case of the DQRR reset bug...
1653          */
1654         if (s->dqrr.reset_bug) {
1655                 /* We pick up new entries by cache-inhibited producer index,
1656                  * which means that a non-coherent mapping would require us to
1657                  * invalidate and read *only* once that PI has indicated that
1658                  * there's an entry here. The first trip around the DQRR ring
1659                  * will be much less efficient than all subsequent trips around
1660                  * it...
1661                  */
1662                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1663                              QMAN_DQRR_PI_MASK;
1664
1665                 /* there are new entries if pi != next_idx */
1666                 if (pi == s->dqrr.next_idx)
1667                         return NULL;
1668
1669                 /* if next_idx is/was the last ring index, and 'pi' is
1670                  * different, we can disable the workaround as all the ring
1671                  * entries have now been DMA'd to so valid-bit checking is
1672                  * repaired. Note: this logic needs to be based on next_idx
1673                  * (which increments one at a time), rather than on pi (which
1674                  * can burst and wrap-around between our snapshots of it).
1675                  */
1676                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1677                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1678                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1679                                  s->dqrr.next_idx, pi);
1680                         s->dqrr.reset_bug = 0;
1681                 }
1682                 qbman_cena_invalidate_prefetch(&s->sys,
1683                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1684         }
1685         p = qbman_cena_read_wo_shadow(&s->sys,
1686                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1687
1688         verb = p->dq.verb;
1689
1690         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1691          * in the DQRR reset bug workaround, we shouldn't need to skip these
1692          * check, because we've already determined that a new entry is available
1693          * and we've invalidated the cacheline before reading it, so the
1694          * valid-bit behaviour is repaired and should tell us what we already
1695          * knew from reading PI.
1696          */
1697         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1698                 return NULL;
1699
1700         /* There's something there. Move "next_idx" attention to the next ring
1701          * entry (and prefetch it) before returning what we found.
1702          */
1703         s->dqrr.next_idx++;
1704         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1705                 s->dqrr.next_idx = 0;
1706                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1707         }
1708         /* If this is the final response to a volatile dequeue command
1709          * indicate that the vdq is no longer busy
1710          */
1711         flags = p->dq.stat;
1712         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1713         if ((response_verb == QBMAN_RESULT_DQ) &&
1714             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1715             (flags & QBMAN_DQ_STAT_EXPIRED))
1716                 atomic_inc(&s->vdq.busy);
1717
1718         return p;
1719 }
1720
1721 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1722 {
1723         uint32_t verb;
1724         uint32_t response_verb;
1725         uint32_t flags;
1726         const struct qbman_result *p;
1727
1728         p = qbman_cena_read_wo_shadow(&s->sys,
1729                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1730
1731         verb = p->dq.verb;
1732
1733         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1734          * in the DQRR reset bug workaround, we shouldn't need to skip these
1735          * check, because we've already determined that a new entry is available
1736          * and we've invalidated the cacheline before reading it, so the
1737          * valid-bit behaviour is repaired and should tell us what we already
1738          * knew from reading PI.
1739          */
1740         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1741                 return NULL;
1742
1743         /* There's something there. Move "next_idx" attention to the next ring
1744          * entry (and prefetch it) before returning what we found.
1745          */
1746         s->dqrr.next_idx++;
1747         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1748                 s->dqrr.next_idx = 0;
1749                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1750         }
1751         /* If this is the final response to a volatile dequeue command
1752          * indicate that the vdq is no longer busy
1753          */
1754         flags = p->dq.stat;
1755         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1756         if ((response_verb == QBMAN_RESULT_DQ)
1757                         && (flags & QBMAN_DQ_STAT_VOLATILE)
1758                         && (flags & QBMAN_DQ_STAT_EXPIRED))
1759                 atomic_inc(&s->vdq.busy);
1760         return p;
1761 }
1762
1763 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1764 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1765                             const struct qbman_result *dq)
1766 {
1767         qbman_cinh_write(&s->sys,
1768                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1769 }
1770
1771 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1772 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1773                             uint8_t dqrr_index)
1774 {
1775         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1776 }
1777
1778 /*********************************/
1779 /* Polling user-provided storage */
1780 /*********************************/
1781
1782 int qbman_result_has_new_result(struct qbman_swp *s,
1783                                 struct qbman_result *dq)
1784 {
1785         if (dq->dq.tok == 0)
1786                 return 0;
1787
1788         /*
1789          * Set token to be 0 so we will detect change back to 1
1790          * next time the looping is traversed. Const is cast away here
1791          * as we want users to treat the dequeue responses as read only.
1792          */
1793         ((struct qbman_result *)dq)->dq.tok = 0;
1794
1795         /*
1796          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1797          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1798          * that makes it available. Eg. we may be looking at our 10th dequeue
1799          * result, having released VDQCR after the 1st result and it is now
1800          * busy due to some other command!
1801          */
1802         if (s->vdq.storage == dq) {
1803                 s->vdq.storage = NULL;
1804                 atomic_inc(&s->vdq.busy);
1805         }
1806
1807         return 1;
1808 }
1809
1810 int qbman_check_new_result(struct qbman_result *dq)
1811 {
1812         if (dq->dq.tok == 0)
1813                 return 0;
1814
1815         /*
1816          * Set token to be 0 so we will detect change back to 1
1817          * next time the looping is traversed. Const is cast away here
1818          * as we want users to treat the dequeue responses as read only.
1819          */
1820         ((struct qbman_result *)dq)->dq.tok = 0;
1821
1822         return 1;
1823 }
1824
1825 int qbman_check_command_complete(struct qbman_result *dq)
1826 {
1827         struct qbman_swp *s;
1828
1829         if (dq->dq.tok == 0)
1830                 return 0;
1831
1832         s = portal_idx_map[dq->dq.tok - 1];
1833         /*
1834          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1835          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1836          * that makes it available. Eg. we may be looking at our 10th dequeue
1837          * result, having released VDQCR after the 1st result and it is now
1838          * busy due to some other command!
1839          */
1840         if (s->vdq.storage == dq) {
1841                 s->vdq.storage = NULL;
1842                 atomic_inc(&s->vdq.busy);
1843         }
1844
1845         return 1;
1846 }
1847
1848 /********************************/
1849 /* Categorising qbman results   */
1850 /********************************/
1851
1852 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1853                                       uint8_t x)
1854 {
1855         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1856
1857         return (response_verb == x);
1858 }
1859
1860 int qbman_result_is_DQ(const struct qbman_result *dq)
1861 {
1862         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1863 }
1864
1865 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1866 {
1867         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1868 }
1869
1870 int qbman_result_is_CDAN(const struct qbman_result *dq)
1871 {
1872         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1873 }
1874
1875 int qbman_result_is_CSCN(const struct qbman_result *dq)
1876 {
1877         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1878                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1879 }
1880
1881 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1882 {
1883         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1884 }
1885
1886 int qbman_result_is_CGCU(const struct qbman_result *dq)
1887 {
1888         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1889 }
1890
1891 int qbman_result_is_FQRN(const struct qbman_result *dq)
1892 {
1893         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1894 }
1895
1896 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1897 {
1898         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1899 }
1900
1901 int qbman_result_is_FQPN(const struct qbman_result *dq)
1902 {
1903         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1904 }
1905
1906 /*********************************/
1907 /* Parsing frame dequeue results */
1908 /*********************************/
1909
1910 /* These APIs assume qbman_result_is_DQ() is TRUE */
1911
1912 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1913 {
1914         return dq->dq.stat;
1915 }
1916
1917 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1918 {
1919         return dq->dq.seqnum;
1920 }
1921
1922 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1923 {
1924         return dq->dq.oprid;
1925 }
1926
1927 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1928 {
1929         return dq->dq.fqid;
1930 }
1931
1932 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1933 {
1934         return dq->dq.fq_byte_cnt;
1935 }
1936
1937 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1938 {
1939         return dq->dq.fq_frm_cnt;
1940 }
1941
1942 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1943 {
1944         return dq->dq.fqd_ctx;
1945 }
1946
1947 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1948 {
1949         return (const struct qbman_fd *)&dq->dq.fd[0];
1950 }
1951
1952 /**************************************/
1953 /* Parsing state-change notifications */
1954 /**************************************/
1955 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1956 {
1957         return scn->scn.state;
1958 }
1959
1960 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1961 {
1962         return scn->scn.rid_tok;
1963 }
1964
1965 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1966 {
1967         return scn->scn.ctx;
1968 }
1969
1970 /*****************/
1971 /* Parsing BPSCN */
1972 /*****************/
1973 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1974 {
1975         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1976 }
1977
1978 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1979 {
1980         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1981 }
1982
1983 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1984 {
1985         return (int)(qbman_result_SCN_state(scn) & 0x2);
1986 }
1987
1988 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1989 {
1990         return (int)(qbman_result_SCN_state(scn) & 0x4);
1991 }
1992
1993 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1994 {
1995         return qbman_result_SCN_ctx(scn);
1996 }
1997
1998 /*****************/
1999 /* Parsing CGCU  */
2000 /*****************/
2001 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2002 {
2003         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2004 }
2005
2006 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2007 {
2008         return qbman_result_SCN_ctx(scn);
2009 }
2010
2011 /********************/
2012 /* Parsing EQ RESP  */
2013 /********************/
2014 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2015 {
2016         return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2017 }
2018
2019 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2020 {
2021         eqresp->eq_resp.rspid = val;
2022 }
2023
2024 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2025 {
2026         return eqresp->eq_resp.rspid;
2027 }
2028
2029 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2030 {
2031         if (eqresp->eq_resp.rc == 0xE)
2032                 return 0;
2033         else
2034                 return -1;
2035 }
2036
2037 /******************/
2038 /* Buffer release */
2039 /******************/
2040 #define QB_BR_RC_VALID_SHIFT  5
2041 #define QB_BR_RCDI_SHIFT      6
2042
2043 void qbman_release_desc_clear(struct qbman_release_desc *d)
2044 {
2045         memset(d, 0, sizeof(*d));
2046         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2047 }
2048
2049 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2050 {
2051         d->br.bpid = bpid;
2052 }
2053
2054 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2055 {
2056         if (enable)
2057                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2058         else
2059                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2060 }
2061
2062 #define RAR_IDX(rar)     ((rar) & 0x7)
2063 #define RAR_VB(rar)      ((rar) & 0x80)
2064 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2065
2066 static int qbman_swp_release_direct(struct qbman_swp *s,
2067                                     const struct qbman_release_desc *d,
2068                                     const uint64_t *buffers,
2069                                     unsigned int num_buffers)
2070 {
2071         uint32_t *p;
2072         const uint32_t *cl = qb_cl(d);
2073         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2074
2075         pr_debug("RAR=%08x\n", rar);
2076         if (!RAR_SUCCESS(rar))
2077                 return -EBUSY;
2078
2079         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2080
2081         /* Start the release command */
2082         p = qbman_cena_write_start_wo_shadow(&s->sys,
2083                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2084
2085         /* Copy the caller's buffer pointers to the command */
2086         u64_to_le32_copy(&p[2], buffers, num_buffers);
2087
2088         /* Set the verb byte, have to substitute in the valid-bit and the
2089          * number of buffers.
2090          */
2091         lwsync();
2092         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2093         qbman_cena_write_complete_wo_shadow(&s->sys,
2094                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2095
2096         return 0;
2097 }
2098
2099 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2100                                       const struct qbman_release_desc *d,
2101                                       const uint64_t *buffers,
2102                                       unsigned int num_buffers)
2103 {
2104         uint32_t *p;
2105         const uint32_t *cl = qb_cl(d);
2106         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2107
2108         pr_debug("RAR=%08x\n", rar);
2109         if (!RAR_SUCCESS(rar))
2110                 return -EBUSY;
2111
2112         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2113
2114         /* Start the release command */
2115         p = qbman_cena_write_start_wo_shadow(&s->sys,
2116                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2117
2118         /* Copy the caller's buffer pointers to the command */
2119         u64_to_le32_copy(&p[2], buffers, num_buffers);
2120
2121         /* Set the verb byte, have to substitute in the valid-bit and the
2122          * number of buffers.
2123          */
2124         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2125         lwsync();
2126         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2127                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2128
2129         return 0;
2130 }
2131
2132 inline int qbman_swp_release(struct qbman_swp *s,
2133                              const struct qbman_release_desc *d,
2134                              const uint64_t *buffers,
2135                              unsigned int num_buffers)
2136 {
2137         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2138 }
2139
2140 /*******************/
2141 /* Buffer acquires */
2142 /*******************/
2143 struct qbman_acquire_desc {
2144         uint8_t verb;
2145         uint8_t reserved;
2146         uint16_t bpid;
2147         uint8_t num;
2148         uint8_t reserved2[59];
2149 };
2150
2151 struct qbman_acquire_rslt {
2152         uint8_t verb;
2153         uint8_t rslt;
2154         uint16_t reserved;
2155         uint8_t num;
2156         uint8_t reserved2[3];
2157         uint64_t buf[7];
2158 };
2159
2160 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2161                       unsigned int num_buffers)
2162 {
2163         struct qbman_acquire_desc *p;
2164         struct qbman_acquire_rslt *r;
2165
2166         if (!num_buffers || (num_buffers > 7))
2167                 return -EINVAL;
2168
2169         /* Start the management command */
2170         p = qbman_swp_mc_start(s);
2171
2172         if (!p)
2173                 return -EBUSY;
2174
2175         /* Encode the caller-provided attributes */
2176         p->bpid = bpid;
2177         p->num = num_buffers;
2178
2179         /* Complete the management command */
2180         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2181         if (!r) {
2182                 pr_err("qbman: acquire from BPID %d failed, no response\n",
2183                        bpid);
2184                 return -EIO;
2185         }
2186
2187         /* Decode the outcome */
2188         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2189
2190         /* Determine success or failure */
2191         if (r->rslt != QBMAN_MC_RSLT_OK) {
2192                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2193                        bpid, r->rslt);
2194                 return -EIO;
2195         }
2196
2197         QBMAN_BUG_ON(r->num > num_buffers);
2198
2199         /* Copy the acquired buffers to the caller's array */
2200         u64_from_le32_copy(buffers, &r->buf[0], r->num);
2201
2202         return (int)r->num;
2203 }
2204
2205 /*****************/
2206 /* FQ management */
2207 /*****************/
2208 struct qbman_alt_fq_state_desc {
2209         uint8_t verb;
2210         uint8_t reserved[3];
2211         uint32_t fqid;
2212         uint8_t reserved2[56];
2213 };
2214
2215 struct qbman_alt_fq_state_rslt {
2216         uint8_t verb;
2217         uint8_t rslt;
2218         uint8_t reserved[62];
2219 };
2220
2221 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2222
2223 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2224                                   uint8_t alt_fq_verb)
2225 {
2226         struct qbman_alt_fq_state_desc *p;
2227         struct qbman_alt_fq_state_rslt *r;
2228
2229         /* Start the management command */
2230         p = qbman_swp_mc_start(s);
2231         if (!p)
2232                 return -EBUSY;
2233
2234         p->fqid = fqid & ALT_FQ_FQID_MASK;
2235
2236         /* Complete the management command */
2237         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2238         if (!r) {
2239                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2240                        alt_fq_verb);
2241                 return -EIO;
2242         }
2243
2244         /* Decode the outcome */
2245         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2246
2247         /* Determine success or failure */
2248         if (r->rslt != QBMAN_MC_RSLT_OK) {
2249                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2250                        fqid, alt_fq_verb, r->rslt);
2251                 return -EIO;
2252         }
2253
2254         return 0;
2255 }
2256
2257 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2258 {
2259         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2260 }
2261
2262 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2263 {
2264         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2265 }
2266
2267 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2268 {
2269         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2270 }
2271
2272 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2273 {
2274         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2275 }
2276
2277 /**********************/
2278 /* Channel management */
2279 /**********************/
2280
2281 struct qbman_cdan_ctrl_desc {
2282         uint8_t verb;
2283         uint8_t reserved;
2284         uint16_t ch;
2285         uint8_t we;
2286         uint8_t ctrl;
2287         uint16_t reserved2;
2288         uint64_t cdan_ctx;
2289         uint8_t reserved3[48];
2290
2291 };
2292
2293 struct qbman_cdan_ctrl_rslt {
2294         uint8_t verb;
2295         uint8_t rslt;
2296         uint16_t ch;
2297         uint8_t reserved[60];
2298 };
2299
2300 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2301  * would be irresponsible to expose it.
2302  */
2303 #define CODE_CDAN_WE_EN    0x1
2304 #define CODE_CDAN_WE_CTX   0x4
2305
2306 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2307                               uint8_t we_mask, uint8_t cdan_en,
2308                               uint64_t ctx)
2309 {
2310         struct qbman_cdan_ctrl_desc *p;
2311         struct qbman_cdan_ctrl_rslt *r;
2312
2313         /* Start the management command */
2314         p = qbman_swp_mc_start(s);
2315         if (!p)
2316                 return -EBUSY;
2317
2318         /* Encode the caller-provided attributes */
2319         p->ch = channelid;
2320         p->we = we_mask;
2321         if (cdan_en)
2322                 p->ctrl = 1;
2323         else
2324                 p->ctrl = 0;
2325         p->cdan_ctx = ctx;
2326
2327         /* Complete the management command */
2328         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2329         if (!r) {
2330                 pr_err("qbman: wqchan config failed, no response\n");
2331                 return -EIO;
2332         }
2333
2334         /* Decode the outcome */
2335         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2336                      != QBMAN_WQCHAN_CONFIGURE);
2337
2338         /* Determine success or failure */
2339         if (r->rslt != QBMAN_MC_RSLT_OK) {
2340                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2341                        channelid, r->rslt);
2342                 return -EIO;
2343         }
2344
2345         return 0;
2346 }
2347
2348 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2349                                uint64_t ctx)
2350 {
2351         return qbman_swp_CDAN_set(s, channelid,
2352                                   CODE_CDAN_WE_CTX,
2353                                   0, ctx);
2354 }
2355
2356 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2357 {
2358         return qbman_swp_CDAN_set(s, channelid,
2359                                   CODE_CDAN_WE_EN,
2360                                   1, 0);
2361 }
2362
2363 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2364 {
2365         return qbman_swp_CDAN_set(s, channelid,
2366                                   CODE_CDAN_WE_EN,
2367                                   0, 0);
2368 }
2369
2370 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2371                                       uint64_t ctx)
2372 {
2373         return qbman_swp_CDAN_set(s, channelid,
2374                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2375                                   1, ctx);
2376 }
2377
2378 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2379 {
2380         return QBMAN_IDX_FROM_DQRR(dqrr);
2381 }
2382
2383 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2384 {
2385         struct qbman_result *dq;
2386
2387         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
2388         return dq;
2389 }