drivers: fix typo in NXP comments
[dpdk.git] / drivers / bus / fslmc / qbman / qbman_portal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2018 NXP
5  *
6  */
7
8 #include "qbman_sys.h"
9 #include "qbman_portal.h"
10
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE       0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
14
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
17
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE       0x48
20 #define QBMAN_FQ_FORCE          0x49
21 #define QBMAN_FQ_XON            0x4d
22 #define QBMAN_FQ_XOFF           0x4e
23
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
27
28 #define QBMAN_RESPONSE_VERB_MASK   0x7f
29
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT   29
34 #define QB_SDQCR_FC_MASK    0x1
35 #define QB_SDQCR_DCT_SHIFT  24
36 #define QB_SDQCR_DCT_MASK   0x3
37 #define QB_SDQCR_TOK_SHIFT  16
38 #define QB_SDQCR_TOK_MASK   0xff
39 #define QB_SDQCR_SRC_SHIFT  0
40 #define QB_SDQCR_SRC_MASK   0xffff
41
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN    0xbb
44
45 enum qbman_sdqcr_dct {
46         qbman_sdqcr_dct_null = 0,
47         qbman_sdqcr_dct_prio_ics,
48         qbman_sdqcr_dct_active_ics,
49         qbman_sdqcr_dct_active
50 };
51
52 enum qbman_sdqcr_fc {
53         qbman_sdqcr_fc_one = 0,
54         qbman_sdqcr_fc_up_to_3 = 1
55 };
56
57 /* We need to keep track of which SWP triggered a pull command
58  * so keep an array of portal IDs and use the token field to
59  * be able to find the proper portal
60  */
61 #define MAX_QBMAN_PORTALS  64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
63
64 /* Internal Function declaration */
65 static int
66 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
67                 const struct qbman_eq_desc *d,
68                 const struct qbman_fd *fd);
69 static int
70 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
71                 const struct qbman_eq_desc *d,
72                 const struct qbman_fd *fd);
73
74 static int
75 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
76                 const struct qbman_eq_desc *d,
77                 const struct qbman_fd *fd);
78 static int
79 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
80                 const struct qbman_eq_desc *d,
81                 const struct qbman_fd *fd);
82
83 static int
84 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
85                 const struct qbman_eq_desc *d,
86                 const struct qbman_fd *fd,
87                 uint32_t *flags,
88                 int num_frames);
89 static int
90 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
91                 const struct qbman_eq_desc *d,
92                 const struct qbman_fd *fd,
93                 uint32_t *flags,
94                 int num_frames);
95
96 static int
97 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
98                 const struct qbman_eq_desc *d,
99                 struct qbman_fd **fd,
100                 uint32_t *flags,
101                 int num_frames);
102
103 static int
104 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
105                 const struct qbman_eq_desc *d,
106                 struct qbman_fd **fd,
107                 uint32_t *flags,
108                 int num_frames);
109
110 static int
111 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
112                 const struct qbman_eq_desc *d,
113                 const struct qbman_fd *fd,
114                 int num_frames);
115 static int
116 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
117                 const struct qbman_eq_desc *d,
118                 const struct qbman_fd *fd,
119                 int num_frames);
120
121 static int
122 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
123 static int
124 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
125
126 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
127 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
128
129 static int
130 qbman_swp_release_direct(struct qbman_swp *s,
131                 const struct qbman_release_desc *d,
132                 const uint64_t *buffers, unsigned int num_buffers);
133 static int
134 qbman_swp_release_mem_back(struct qbman_swp *s,
135                 const struct qbman_release_desc *d,
136                 const uint64_t *buffers, unsigned int num_buffers);
137
138 /* Function pointers */
139 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
140                 const struct qbman_eq_desc *d,
141                 const struct qbman_fd *fd)
142         = qbman_swp_enqueue_array_mode_direct;
143
144 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
145                 const struct qbman_eq_desc *d,
146                 const struct qbman_fd *fd)
147         = qbman_swp_enqueue_ring_mode_direct;
148
149 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
150                 const struct qbman_eq_desc *d,
151                 const struct qbman_fd *fd,
152                 uint32_t *flags,
153                 int num_frames)
154         = qbman_swp_enqueue_multiple_direct;
155
156 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
157                 const struct qbman_eq_desc *d,
158                 struct qbman_fd **fd,
159                 uint32_t *flags,
160                 int num_frames)
161         = qbman_swp_enqueue_multiple_fd_direct;
162
163 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
164                 const struct qbman_eq_desc *d,
165                 const struct qbman_fd *fd,
166                 int num_frames)
167         = qbman_swp_enqueue_multiple_desc_direct;
168
169 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
170                 struct qbman_pull_desc *d)
171         = qbman_swp_pull_direct;
172
173 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
174                 = qbman_swp_dqrr_next_direct;
175
176 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
177                         const struct qbman_release_desc *d,
178                         const uint64_t *buffers, unsigned int num_buffers)
179                         = qbman_swp_release_direct;
180
181 /*********************************/
182 /* Portal constructor/destructor */
183 /*********************************/
184
185 /* Software portals should always be in the power-on state when we initialise,
186  * due to the CCSR-based portal reset functionality that MC has.
187  *
188  * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
189  * valid-bits, so we need to support a workaround where we don't trust
190  * valid-bits when detecting new entries until any stale ring entries have been
191  * overwritten at least once. The idea is that we read PI for the first few
192  * entries, then switch to valid-bit after that. The trick is to clear the
193  * bug-work-around boolean once the PI wraps around the ring for the first time.
194  *
195  * Note: this still carries a slight additional cost once the decrementer hits
196  * zero.
197  */
198 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
199 {
200         int ret;
201         uint32_t eqcr_pi;
202         uint32_t mask_size;
203         struct qbman_swp *p = malloc(sizeof(*p));
204
205         if (!p)
206                 return NULL;
207
208         memset(p, 0, sizeof(struct qbman_swp));
209
210         p->desc = *d;
211 #ifdef QBMAN_CHECKING
212         p->mc.check = swp_mc_can_start;
213 #endif
214         p->mc.valid_bit = QB_VALID_BIT;
215         p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
216         p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
217         p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
218         if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
219                         && (d->cena_access_mode == qman_cena_fastest_access))
220                 p->mr.valid_bit = QB_VALID_BIT;
221
222         atomic_set(&p->vdq.busy, 1);
223         p->vdq.valid_bit = QB_VALID_BIT;
224         p->dqrr.valid_bit = QB_VALID_BIT;
225         qman_version = p->desc.qman_version;
226         if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
227                 p->dqrr.dqrr_size = 4;
228                 p->dqrr.reset_bug = 1;
229         } else {
230                 p->dqrr.dqrr_size = 8;
231                 p->dqrr.reset_bug = 0;
232         }
233
234         ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
235         if (ret) {
236                 free(p);
237                 pr_err("qbman_swp_sys_init() failed %d\n", ret);
238                 return NULL;
239         }
240
241         /* Verify that the DQRRPI is 0 - if it is not the portal isn't
242          * in default state which is an error
243          */
244         if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
245                 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
246                 free(p);
247                 return NULL;
248         }
249
250         /* SDQCR needs to be initialized to 0 when no channels are
251          * being dequeued from or else the QMan HW will indicate an
252          * error.  The values that were calculated above will be
253          * applied when dequeues from a specific channel are enabled.
254          */
255         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
256
257         p->eqcr.pi_ring_size = 8;
258         if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
259                         && (d->cena_access_mode == qman_cena_fastest_access)) {
260                 p->eqcr.pi_ring_size = 32;
261                 qbman_swp_enqueue_array_mode_ptr =
262                                 qbman_swp_enqueue_array_mode_mem_back;
263                 qbman_swp_enqueue_ring_mode_ptr =
264                                 qbman_swp_enqueue_ring_mode_mem_back;
265                 qbman_swp_enqueue_multiple_ptr =
266                                 qbman_swp_enqueue_multiple_mem_back;
267                 qbman_swp_enqueue_multiple_fd_ptr =
268                                 qbman_swp_enqueue_multiple_fd_mem_back;
269                 qbman_swp_enqueue_multiple_desc_ptr =
270                                 qbman_swp_enqueue_multiple_desc_mem_back;
271                 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
272                 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
273                 qbman_swp_release_ptr = qbman_swp_release_mem_back;
274         }
275
276         for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
277                 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
278         eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
279         p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
280         p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
281         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
282                         && (d->cena_access_mode == qman_cena_fastest_access))
283                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
284                                              & p->eqcr.pi_ci_mask;
285         else
286                 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
287                                              & p->eqcr.pi_ci_mask;
288         p->eqcr.available = p->eqcr.pi_ring_size -
289                                 qm_cyc_diff(p->eqcr.pi_ring_size,
290                                 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
291                                 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
292
293         portal_idx_map[p->desc.idx] = p;
294         return p;
295 }
296
297 void qbman_swp_finish(struct qbman_swp *p)
298 {
299 #ifdef QBMAN_CHECKING
300         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
301 #endif
302         qbman_swp_sys_finish(&p->sys);
303         portal_idx_map[p->desc.idx] = NULL;
304         free(p);
305 }
306
307 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
308 {
309         return &p->desc;
310 }
311
312 /**************/
313 /* Interrupts */
314 /**************/
315
316 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
317 {
318         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
319 }
320
321 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
322 {
323         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
324 }
325
326 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
327 {
328         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
329 }
330
331 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
332 {
333         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
334 }
335
336 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
337 {
338         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
339 }
340
341 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
342 {
343         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
344 }
345
346 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
347 {
348         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
349 }
350
351 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
352 {
353         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
354 }
355
356 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
357 {
358         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
359 }
360
361 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
362 {
363         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
364 }
365
366 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
367 {
368         return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
369 }
370
371 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
372 {
373         qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
374                          inhibit ? 0xffffffff : 0);
375 }
376
377 /***********************/
378 /* Management commands */
379 /***********************/
380
381 /*
382  * Internal code common to all types of management commands.
383  */
384
385 void *qbman_swp_mc_start(struct qbman_swp *p)
386 {
387         void *ret;
388 #ifdef QBMAN_CHECKING
389         QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
390 #endif
391         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
392                     && (p->desc.cena_access_mode == qman_cena_fastest_access))
393                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
394         else
395                 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
396 #ifdef QBMAN_CHECKING
397         if (!ret)
398                 p->mc.check = swp_mc_can_submit;
399 #endif
400         return ret;
401 }
402
403 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
404 {
405         uint8_t *v = cmd;
406 #ifdef QBMAN_CHECKING
407         QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
408 #endif
409         /* TBD: "|=" is going to hurt performance. Need to move as many fields
410          * out of word zero, and for those that remain, the "OR" needs to occur
411          * at the caller side. This debug check helps to catch cases where the
412          * caller wants to OR but has forgotten to do so.
413          */
414         QBMAN_BUG_ON((*v & cmd_verb) != *v);
415         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
416                     && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
417                 *v = cmd_verb | p->mr.valid_bit;
418                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
419                 dma_wmb();
420                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
421         } else {
422                 dma_wmb();
423                 *v = cmd_verb | p->mc.valid_bit;
424                 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
425                 clean(cmd);
426         }
427 #ifdef QBMAN_CHECKING
428         p->mc.check = swp_mc_can_poll;
429 #endif
430 }
431
432 void *qbman_swp_mc_result(struct qbman_swp *p)
433 {
434         uint32_t *ret, verb;
435 #ifdef QBMAN_CHECKING
436         QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
437 #endif
438         if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
439                 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
440                 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
441                 /* Command completed if the valid bit is toggled */
442                 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
443                         return NULL;
444                 /* Remove the valid-bit -
445                  * command completed iff the rest is non-zero
446                  */
447                 verb = ret[0] & ~QB_VALID_BIT;
448                 if (!verb)
449                         return NULL;
450                 p->mr.valid_bit ^= QB_VALID_BIT;
451         } else {
452                 qbman_cena_invalidate_prefetch(&p->sys,
453                         QBMAN_CENA_SWP_RR(p->mc.valid_bit));
454                 ret = qbman_cena_read(&p->sys,
455                                       QBMAN_CENA_SWP_RR(p->mc.valid_bit));
456                 /* Remove the valid-bit -
457                  * command completed iff the rest is non-zero
458                  */
459                 verb = ret[0] & ~QB_VALID_BIT;
460                 if (!verb)
461                         return NULL;
462                 p->mc.valid_bit ^= QB_VALID_BIT;
463         }
464 #ifdef QBMAN_CHECKING
465         p->mc.check = swp_mc_can_start;
466 #endif
467         return ret;
468 }
469
470 /***********/
471 /* Enqueue */
472 /***********/
473
474 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
475 enum qb_enqueue_commands {
476         enqueue_empty = 0,
477         enqueue_response_always = 1,
478         enqueue_rejects_to_fq = 2
479 };
480
481 #define QB_ENQUEUE_CMD_EC_OPTION_MASK        0x3
482 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
483 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
484 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
485 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT          6
486 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
487 #define QB_ENQUEUE_CMD_NLIS_SHIFT            14
488 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT         15
489
490 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
491 {
492         memset(d, 0, sizeof(*d));
493 }
494
495 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
496 {
497         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
498         if (respond_success)
499                 d->eq.verb |= enqueue_response_always;
500         else
501                 d->eq.verb |= enqueue_rejects_to_fq;
502 }
503
504 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
505                            uint16_t opr_id, uint16_t seqnum, int incomplete)
506 {
507         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
508         if (respond_success)
509                 d->eq.verb |= enqueue_response_always;
510         else
511                 d->eq.verb |= enqueue_rejects_to_fq;
512
513         d->eq.orpid = opr_id;
514         d->eq.seqnum = seqnum;
515         if (incomplete)
516                 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
517         else
518                 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
519 }
520
521 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
522                                 uint16_t seqnum)
523 {
524         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
525         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
526         d->eq.orpid = opr_id;
527         d->eq.seqnum = seqnum;
528         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
529         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
530 }
531
532 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
533                                 uint16_t seqnum)
534 {
535         d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
536         d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
537         d->eq.orpid = opr_id;
538         d->eq.seqnum = seqnum;
539         d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
540         d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
541 }
542
543 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
544                                 dma_addr_t storage_phys,
545                                 int stash)
546 {
547         d->eq.rsp_addr = storage_phys;
548         d->eq.wae = stash;
549 }
550
551 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
552 {
553         d->eq.rspid = token;
554 }
555
556 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
557 {
558         d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
559         d->eq.tgtid = fqid;
560 }
561
562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
563                           uint16_t qd_bin, uint8_t qd_prio)
564 {
565         d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
566         d->eq.tgtid = qdid;
567         d->eq.qdbin = qd_bin;
568         d->eq.qpri = qd_prio;
569 }
570
571 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
572 {
573         if (enable)
574                 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
575         else
576                 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
577 }
578
579 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
580                            uint8_t dqrr_idx, int park)
581 {
582         if (enable) {
583                 d->eq.dca = dqrr_idx;
584                 if (park)
585                         d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
586                 else
587                         d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
588                 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
589         } else {
590                 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
591         }
592 }
593
594 #define EQAR_IDX(eqar)     ((eqar) & 0x1f)
595 #define EQAR_VB(eqar)      ((eqar) & 0x80)
596 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
597
598 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
599                                                    uint8_t idx)
600 {
601         if (idx < 16)
602                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
603                                      QMAN_RT_MODE);
604         else
605                 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
606                                      (idx - 16) * 4,
607                                      QMAN_RT_MODE);
608 }
609
610
611 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
612                                                const struct qbman_eq_desc *d,
613                                                const struct qbman_fd *fd)
614 {
615         uint32_t *p;
616         const uint32_t *cl = qb_cl(d);
617         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
618
619         pr_debug("EQAR=%08x\n", eqar);
620         if (!EQAR_SUCCESS(eqar))
621                 return -EBUSY;
622         p = qbman_cena_write_start_wo_shadow(&s->sys,
623                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
624         memcpy(&p[1], &cl[1], 28);
625         memcpy(&p[8], fd, sizeof(*fd));
626
627         /* Set the verb byte, have to substitute in the valid-bit */
628         dma_wmb();
629         p[0] = cl[0] | EQAR_VB(eqar);
630         qbman_cena_write_complete_wo_shadow(&s->sys,
631                                 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
632         return 0;
633 }
634 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
635                                                  const struct qbman_eq_desc *d,
636                                                  const struct qbman_fd *fd)
637 {
638         uint32_t *p;
639         const uint32_t *cl = qb_cl(d);
640         uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
641
642         pr_debug("EQAR=%08x\n", eqar);
643         if (!EQAR_SUCCESS(eqar))
644                 return -EBUSY;
645         p = qbman_cena_write_start_wo_shadow(&s->sys,
646                         QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
647         memcpy(&p[1], &cl[1], 28);
648         memcpy(&p[8], fd, sizeof(*fd));
649
650         /* Set the verb byte, have to substitute in the valid-bit */
651         p[0] = cl[0] | EQAR_VB(eqar);
652         dma_wmb();
653         qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
654         return 0;
655 }
656
657 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
658                                                const struct qbman_eq_desc *d,
659                                                const struct qbman_fd *fd)
660 {
661         return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
662 }
663
664 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
665                                               const struct qbman_eq_desc *d,
666                                               const struct qbman_fd *fd)
667 {
668         uint32_t *p;
669         const uint32_t *cl = qb_cl(d);
670         uint32_t eqcr_ci, full_mask, half_mask;
671
672         half_mask = (s->eqcr.pi_ci_mask>>1);
673         full_mask = s->eqcr.pi_ci_mask;
674         if (!s->eqcr.available) {
675                 eqcr_ci = s->eqcr.ci;
676                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
677                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
678                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
679                                 eqcr_ci, s->eqcr.ci);
680                 if (!s->eqcr.available)
681                         return -EBUSY;
682         }
683
684         p = qbman_cena_write_start_wo_shadow(&s->sys,
685                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
686         memcpy(&p[1], &cl[1], 28);
687         memcpy(&p[8], fd, sizeof(*fd));
688         lwsync();
689
690         /* Set the verb byte, have to substitute in the valid-bit */
691         p[0] = cl[0] | s->eqcr.pi_vb;
692         qbman_cena_write_complete_wo_shadow(&s->sys,
693                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
694         s->eqcr.pi++;
695         s->eqcr.pi &= full_mask;
696         s->eqcr.available--;
697         if (!(s->eqcr.pi & half_mask))
698                 s->eqcr.pi_vb ^= QB_VALID_BIT;
699
700         return 0;
701 }
702
703 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
704                                                 const struct qbman_eq_desc *d,
705                                                 const struct qbman_fd *fd)
706 {
707         uint32_t *p;
708         const uint32_t *cl = qb_cl(d);
709         uint32_t eqcr_ci, full_mask, half_mask;
710
711         half_mask = (s->eqcr.pi_ci_mask>>1);
712         full_mask = s->eqcr.pi_ci_mask;
713         if (!s->eqcr.available) {
714                 eqcr_ci = s->eqcr.ci;
715                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
716                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
717                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
718                                 eqcr_ci, s->eqcr.ci);
719                 if (!s->eqcr.available)
720                         return -EBUSY;
721         }
722
723         p = qbman_cena_write_start_wo_shadow(&s->sys,
724                         QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
725         memcpy(&p[1], &cl[1], 28);
726         memcpy(&p[8], fd, sizeof(*fd));
727
728         /* Set the verb byte, have to substitute in the valid-bit */
729         p[0] = cl[0] | s->eqcr.pi_vb;
730         s->eqcr.pi++;
731         s->eqcr.pi &= full_mask;
732         s->eqcr.available--;
733         if (!(s->eqcr.pi & half_mask))
734                 s->eqcr.pi_vb ^= QB_VALID_BIT;
735         dma_wmb();
736         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
737                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
738         return 0;
739 }
740
741 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
742                                        const struct qbman_eq_desc *d,
743                                        const struct qbman_fd *fd)
744 {
745         return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
746 }
747
748 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
749                       const struct qbman_fd *fd)
750 {
751         if (s->sys.eqcr_mode == qman_eqcr_vb_array)
752                 return qbman_swp_enqueue_array_mode(s, d, fd);
753         else    /* Use ring mode by default */
754                 return qbman_swp_enqueue_ring_mode(s, d, fd);
755 }
756
757 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
758                                              const struct qbman_eq_desc *d,
759                                              const struct qbman_fd *fd,
760                                              uint32_t *flags,
761                                              int num_frames)
762 {
763         uint32_t *p = NULL;
764         const uint32_t *cl = qb_cl(d);
765         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
766         int i, num_enqueued = 0;
767         uint64_t addr_cena;
768
769         half_mask = (s->eqcr.pi_ci_mask>>1);
770         full_mask = s->eqcr.pi_ci_mask;
771         if (!s->eqcr.available) {
772                 eqcr_ci = s->eqcr.ci;
773                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
774                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
775                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
776                                 eqcr_ci, s->eqcr.ci);
777                 if (!s->eqcr.available)
778                         return 0;
779         }
780
781         eqcr_pi = s->eqcr.pi;
782         num_enqueued = (s->eqcr.available < num_frames) ?
783                         s->eqcr.available : num_frames;
784         s->eqcr.available -= num_enqueued;
785         /* Fill in the EQCR ring */
786         for (i = 0; i < num_enqueued; i++) {
787                 p = qbman_cena_write_start_wo_shadow(&s->sys,
788                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
789                 memcpy(&p[1], &cl[1], 28);
790                 memcpy(&p[8], &fd[i], sizeof(*fd));
791                 eqcr_pi++;
792         }
793
794         lwsync();
795
796         /* Set the verb byte, have to substitute in the valid-bit */
797         eqcr_pi = s->eqcr.pi;
798         for (i = 0; i < num_enqueued; i++) {
799                 p = qbman_cena_write_start_wo_shadow(&s->sys,
800                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
801                 p[0] = cl[0] | s->eqcr.pi_vb;
802                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
803                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
804
805                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
806                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
807                 }
808                 eqcr_pi++;
809                 if (!(eqcr_pi & half_mask))
810                         s->eqcr.pi_vb ^= QB_VALID_BIT;
811         }
812
813         /* Flush all the cacheline without load/store in between */
814         eqcr_pi = s->eqcr.pi;
815         addr_cena = (size_t)s->sys.addr_cena;
816         for (i = 0; i < num_enqueued; i++) {
817                 dcbf((uintptr_t)(addr_cena +
818                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
819                 eqcr_pi++;
820         }
821         s->eqcr.pi = eqcr_pi & full_mask;
822
823         return num_enqueued;
824 }
825
826 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
827                                                const struct qbman_eq_desc *d,
828                                                const struct qbman_fd *fd,
829                                                uint32_t *flags,
830                                                int num_frames)
831 {
832         uint32_t *p = NULL;
833         const uint32_t *cl = qb_cl(d);
834         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
835         int i, num_enqueued = 0;
836
837         half_mask = (s->eqcr.pi_ci_mask>>1);
838         full_mask = s->eqcr.pi_ci_mask;
839         if (!s->eqcr.available) {
840                 eqcr_ci = s->eqcr.ci;
841                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
842                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
843                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
844                                         eqcr_ci, s->eqcr.ci);
845                 if (!s->eqcr.available)
846                         return 0;
847         }
848
849         eqcr_pi = s->eqcr.pi;
850         num_enqueued = (s->eqcr.available < num_frames) ?
851                         s->eqcr.available : num_frames;
852         s->eqcr.available -= num_enqueued;
853         /* Fill in the EQCR ring */
854         for (i = 0; i < num_enqueued; i++) {
855                 p = qbman_cena_write_start_wo_shadow(&s->sys,
856                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
857                 memcpy(&p[1], &cl[1], 28);
858                 memcpy(&p[8], &fd[i], sizeof(*fd));
859                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
860                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
861
862                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
863                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
864                 }
865                 eqcr_pi++;
866                 p[0] = cl[0] | s->eqcr.pi_vb;
867
868                 if (!(eqcr_pi & half_mask))
869                         s->eqcr.pi_vb ^= QB_VALID_BIT;
870         }
871         s->eqcr.pi = eqcr_pi & full_mask;
872
873         dma_wmb();
874         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
875                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
876         return num_enqueued;
877 }
878
879 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
880                                       const struct qbman_eq_desc *d,
881                                       const struct qbman_fd *fd,
882                                       uint32_t *flags,
883                                       int num_frames)
884 {
885         return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
886 }
887
888 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
889                                                 const struct qbman_eq_desc *d,
890                                                 struct qbman_fd **fd,
891                                                 uint32_t *flags,
892                                                 int num_frames)
893 {
894         uint32_t *p = NULL;
895         const uint32_t *cl = qb_cl(d);
896         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
897         int i, num_enqueued = 0;
898         uint64_t addr_cena;
899
900         half_mask = (s->eqcr.pi_ci_mask>>1);
901         full_mask = s->eqcr.pi_ci_mask;
902         if (!s->eqcr.available) {
903                 eqcr_ci = s->eqcr.ci;
904                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
905                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
906                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
907                                 eqcr_ci, s->eqcr.ci);
908                 if (!s->eqcr.available)
909                         return 0;
910         }
911
912         eqcr_pi = s->eqcr.pi;
913         num_enqueued = (s->eqcr.available < num_frames) ?
914                         s->eqcr.available : num_frames;
915         s->eqcr.available -= num_enqueued;
916         /* Fill in the EQCR ring */
917         for (i = 0; i < num_enqueued; i++) {
918                 p = qbman_cena_write_start_wo_shadow(&s->sys,
919                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
920                 memcpy(&p[1], &cl[1], 28);
921                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
922                 eqcr_pi++;
923         }
924
925         lwsync();
926
927         /* Set the verb byte, have to substitute in the valid-bit */
928         eqcr_pi = s->eqcr.pi;
929         for (i = 0; i < num_enqueued; i++) {
930                 p = qbman_cena_write_start_wo_shadow(&s->sys,
931                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
932                 p[0] = cl[0] | s->eqcr.pi_vb;
933                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
934                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
935
936                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
937                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
938                 }
939                 eqcr_pi++;
940                 if (!(eqcr_pi & half_mask))
941                         s->eqcr.pi_vb ^= QB_VALID_BIT;
942         }
943
944         /* Flush all the cacheline without load/store in between */
945         eqcr_pi = s->eqcr.pi;
946         addr_cena = (size_t)s->sys.addr_cena;
947         for (i = 0; i < num_enqueued; i++) {
948                 dcbf(addr_cena +
949                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
950                 eqcr_pi++;
951         }
952         s->eqcr.pi = eqcr_pi & full_mask;
953
954         return num_enqueued;
955 }
956
957 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
958                                                   const struct qbman_eq_desc *d,
959                                                   struct qbman_fd **fd,
960                                                   uint32_t *flags,
961                                                   int num_frames)
962 {
963         uint32_t *p = NULL;
964         const uint32_t *cl = qb_cl(d);
965         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
966         int i, num_enqueued = 0;
967
968         half_mask = (s->eqcr.pi_ci_mask>>1);
969         full_mask = s->eqcr.pi_ci_mask;
970         if (!s->eqcr.available) {
971                 eqcr_ci = s->eqcr.ci;
972                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
973                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
974                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
975                                         eqcr_ci, s->eqcr.ci);
976                 if (!s->eqcr.available)
977                         return 0;
978         }
979
980         eqcr_pi = s->eqcr.pi;
981         num_enqueued = (s->eqcr.available < num_frames) ?
982                         s->eqcr.available : num_frames;
983         s->eqcr.available -= num_enqueued;
984         /* Fill in the EQCR ring */
985         for (i = 0; i < num_enqueued; i++) {
986                 p = qbman_cena_write_start_wo_shadow(&s->sys,
987                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
988                 memcpy(&p[1], &cl[1], 28);
989                 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
990                 eqcr_pi++;
991         }
992
993         /* Set the verb byte, have to substitute in the valid-bit */
994         eqcr_pi = s->eqcr.pi;
995         for (i = 0; i < num_enqueued; i++) {
996                 p = qbman_cena_write_start_wo_shadow(&s->sys,
997                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
998                 p[0] = cl[0] | s->eqcr.pi_vb;
999                 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1000                         struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1001
1002                         d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1003                                 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1004                 }
1005                 eqcr_pi++;
1006                 if (!(eqcr_pi & half_mask))
1007                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1008         }
1009         s->eqcr.pi = eqcr_pi & full_mask;
1010
1011         dma_wmb();
1012         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1013                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1014         return num_enqueued;
1015 }
1016
1017 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1018                                          const struct qbman_eq_desc *d,
1019                                          struct qbman_fd **fd,
1020                                          uint32_t *flags,
1021                                          int num_frames)
1022 {
1023         return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
1024 }
1025
1026 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1027                                         const struct qbman_eq_desc *d,
1028                                         const struct qbman_fd *fd,
1029                                         int num_frames)
1030 {
1031         uint32_t *p;
1032         const uint32_t *cl;
1033         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1034         int i, num_enqueued = 0;
1035         uint64_t addr_cena;
1036
1037         half_mask = (s->eqcr.pi_ci_mask>>1);
1038         full_mask = s->eqcr.pi_ci_mask;
1039         if (!s->eqcr.available) {
1040                 eqcr_ci = s->eqcr.ci;
1041                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1042                                 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1043                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1044                                         eqcr_ci, s->eqcr.ci);
1045                 if (!s->eqcr.available)
1046                         return 0;
1047         }
1048
1049         eqcr_pi = s->eqcr.pi;
1050         num_enqueued = (s->eqcr.available < num_frames) ?
1051                         s->eqcr.available : num_frames;
1052         s->eqcr.available -= num_enqueued;
1053         /* Fill in the EQCR ring */
1054         for (i = 0; i < num_enqueued; i++) {
1055                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1056                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1057                 cl = qb_cl(&d[i]);
1058                 memcpy(&p[1], &cl[1], 28);
1059                 memcpy(&p[8], &fd[i], sizeof(*fd));
1060                 eqcr_pi++;
1061         }
1062
1063         lwsync();
1064
1065         /* Set the verb byte, have to substitute in the valid-bit */
1066         eqcr_pi = s->eqcr.pi;
1067         for (i = 0; i < num_enqueued; i++) {
1068                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1069                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1070                 cl = qb_cl(&d[i]);
1071                 p[0] = cl[0] | s->eqcr.pi_vb;
1072                 eqcr_pi++;
1073                 if (!(eqcr_pi & half_mask))
1074                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1075         }
1076
1077         /* Flush all the cacheline without load/store in between */
1078         eqcr_pi = s->eqcr.pi;
1079         addr_cena = (size_t)s->sys.addr_cena;
1080         for (i = 0; i < num_enqueued; i++) {
1081                 dcbf((uintptr_t)(addr_cena +
1082                         QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1083                 eqcr_pi++;
1084         }
1085         s->eqcr.pi = eqcr_pi & full_mask;
1086
1087         return num_enqueued;
1088 }
1089
1090 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1091                                         const struct qbman_eq_desc *d,
1092                                         const struct qbman_fd *fd,
1093                                         int num_frames)
1094 {
1095         uint32_t *p;
1096         const uint32_t *cl;
1097         uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1098         int i, num_enqueued = 0;
1099
1100         half_mask = (s->eqcr.pi_ci_mask>>1);
1101         full_mask = s->eqcr.pi_ci_mask;
1102         if (!s->eqcr.available) {
1103                 eqcr_ci = s->eqcr.ci;
1104                 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1105                                 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1106                 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1107                                         eqcr_ci, s->eqcr.ci);
1108                 if (!s->eqcr.available)
1109                         return 0;
1110         }
1111
1112         eqcr_pi = s->eqcr.pi;
1113         num_enqueued = (s->eqcr.available < num_frames) ?
1114                         s->eqcr.available : num_frames;
1115         s->eqcr.available -= num_enqueued;
1116         /* Fill in the EQCR ring */
1117         for (i = 0; i < num_enqueued; i++) {
1118                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1119                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1120                 cl = qb_cl(&d[i]);
1121                 memcpy(&p[1], &cl[1], 28);
1122                 memcpy(&p[8], &fd[i], sizeof(*fd));
1123                 eqcr_pi++;
1124         }
1125
1126         /* Set the verb byte, have to substitute in the valid-bit */
1127         eqcr_pi = s->eqcr.pi;
1128         for (i = 0; i < num_enqueued; i++) {
1129                 p = qbman_cena_write_start_wo_shadow(&s->sys,
1130                                 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1131                 cl = qb_cl(&d[i]);
1132                 p[0] = cl[0] | s->eqcr.pi_vb;
1133                 eqcr_pi++;
1134                 if (!(eqcr_pi & half_mask))
1135                         s->eqcr.pi_vb ^= QB_VALID_BIT;
1136         }
1137
1138         s->eqcr.pi = eqcr_pi & full_mask;
1139
1140         dma_wmb();
1141         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1142                                 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1143
1144         return num_enqueued;
1145 }
1146 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1147                                            const struct qbman_eq_desc *d,
1148                                            const struct qbman_fd *fd,
1149                                            int num_frames)
1150 {
1151         return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
1152 }
1153
1154 /*************************/
1155 /* Static (push) dequeue */
1156 /*************************/
1157
1158 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1159 {
1160         uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1161
1162         QBMAN_BUG_ON(channel_idx > 15);
1163         *enabled = src | (1 << channel_idx);
1164 }
1165
1166 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1167 {
1168         uint16_t dqsrc;
1169
1170         QBMAN_BUG_ON(channel_idx > 15);
1171         if (enable)
1172                 s->sdq |= 1 << channel_idx;
1173         else
1174                 s->sdq &= ~(1 << channel_idx);
1175
1176         /* Read make the complete src map.  If no channels are enabled
1177          * the SDQCR must be 0 or else QMan will assert errors
1178          */
1179         dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1180         if (dqsrc != 0)
1181                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1182         else
1183                 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1184 }
1185
1186 /***************************/
1187 /* Volatile (pull) dequeue */
1188 /***************************/
1189
1190 /* These should be const, eventually */
1191 #define QB_VDQCR_VERB_DCT_SHIFT    0
1192 #define QB_VDQCR_VERB_DT_SHIFT     2
1193 #define QB_VDQCR_VERB_RLS_SHIFT    4
1194 #define QB_VDQCR_VERB_WAE_SHIFT    5
1195 #define QB_VDQCR_VERB_RAD_SHIFT    6
1196
1197 enum qb_pull_dt_e {
1198         qb_pull_dt_channel,
1199         qb_pull_dt_workqueue,
1200         qb_pull_dt_framequeue
1201 };
1202
1203 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1204 {
1205         memset(d, 0, sizeof(*d));
1206 }
1207
1208 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1209                                  struct qbman_result *storage,
1210                                  dma_addr_t storage_phys,
1211                                  int stash)
1212 {
1213         d->pull.rsp_addr_virt = (size_t)storage;
1214
1215         if (!storage) {
1216                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1217                 return;
1218         }
1219         d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1220         if (stash)
1221                 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1222         else
1223                 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1224
1225         d->pull.rsp_addr = storage_phys;
1226 }
1227
1228 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1229                                    uint8_t numframes)
1230 {
1231         d->pull.numf = numframes - 1;
1232 }
1233
1234 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1235 {
1236         d->pull.tok = token;
1237 }
1238
1239 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1240 {
1241         d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1242         d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1243         d->pull.dq_src = fqid;
1244 }
1245
1246 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1247                             enum qbman_pull_type_e dct)
1248 {
1249         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1250         d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1251         d->pull.dq_src = wqid;
1252 }
1253
1254 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1255                                  enum qbman_pull_type_e dct)
1256 {
1257         d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1258         d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1259         d->pull.dq_src = chid;
1260 }
1261
1262 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1263 {
1264         if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1265                 if (rad)
1266                         d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1267                 else
1268                         d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1269         } else {
1270                 printf("The RAD feature is not valid when RLS = 0\n");
1271         }
1272 }
1273
1274 static int qbman_swp_pull_direct(struct qbman_swp *s,
1275                                  struct qbman_pull_desc *d)
1276 {
1277         uint32_t *p;
1278         uint32_t *cl = qb_cl(d);
1279
1280         if (!atomic_dec_and_test(&s->vdq.busy)) {
1281                 atomic_inc(&s->vdq.busy);
1282                 return -EBUSY;
1283         }
1284
1285         d->pull.tok = s->sys.idx + 1;
1286         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1287         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1288         memcpy(&p[1], &cl[1], 12);
1289
1290         /* Set the verb byte, have to substitute in the valid-bit */
1291         lwsync();
1292         p[0] = cl[0] | s->vdq.valid_bit;
1293         s->vdq.valid_bit ^= QB_VALID_BIT;
1294         qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1295
1296         return 0;
1297 }
1298
1299 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1300                                    struct qbman_pull_desc *d)
1301 {
1302         uint32_t *p;
1303         uint32_t *cl = qb_cl(d);
1304
1305         if (!atomic_dec_and_test(&s->vdq.busy)) {
1306                 atomic_inc(&s->vdq.busy);
1307                 return -EBUSY;
1308         }
1309
1310         d->pull.tok = s->sys.idx + 1;
1311         s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1312         p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1313         memcpy(&p[1], &cl[1], 12);
1314
1315         /* Set the verb byte, have to substitute in the valid-bit */
1316         p[0] = cl[0] | s->vdq.valid_bit;
1317         s->vdq.valid_bit ^= QB_VALID_BIT;
1318         dma_wmb();
1319         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1320
1321         return 0;
1322 }
1323
1324 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1325 {
1326         return qbman_swp_pull_ptr(s, d);
1327 }
1328
1329 /****************/
1330 /* Polling DQRR */
1331 /****************/
1332
1333 #define QMAN_DQRR_PI_MASK              0xf
1334
1335 #define QBMAN_RESULT_DQ        0x60
1336 #define QBMAN_RESULT_FQRN      0x21
1337 #define QBMAN_RESULT_FQRNI     0x22
1338 #define QBMAN_RESULT_FQPN      0x24
1339 #define QBMAN_RESULT_FQDAN     0x25
1340 #define QBMAN_RESULT_CDAN      0x26
1341 #define QBMAN_RESULT_CSCN_MEM  0x27
1342 #define QBMAN_RESULT_CGCU      0x28
1343 #define QBMAN_RESULT_BPSCN     0x29
1344 #define QBMAN_RESULT_CSCN_WQ   0x2a
1345
1346 #include <rte_prefetch.h>
1347
1348 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1349 {
1350         const struct qbman_result *p;
1351
1352         p = qbman_cena_read_wo_shadow(&s->sys,
1353                 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1354         rte_prefetch0(p);
1355 }
1356
1357 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1358  * only once, so repeated calls can return a sequence of DQRR entries, without
1359  * requiring they be consumed immediately or in any particular order.
1360  */
1361 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1362 {
1363         return qbman_swp_dqrr_next_ptr(s);
1364 }
1365
1366 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1367 {
1368         uint32_t verb;
1369         uint32_t response_verb;
1370         uint32_t flags;
1371         const struct qbman_result *p;
1372
1373         /* Before using valid-bit to detect if something is there, we have to
1374          * handle the case of the DQRR reset bug...
1375          */
1376         if (s->dqrr.reset_bug) {
1377                 /* We pick up new entries by cache-inhibited producer index,
1378                  * which means that a non-coherent mapping would require us to
1379                  * invalidate and read *only* once that PI has indicated that
1380                  * there's an entry here. The first trip around the DQRR ring
1381                  * will be much less efficient than all subsequent trips around
1382                  * it...
1383                  */
1384                 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1385                              QMAN_DQRR_PI_MASK;
1386
1387                 /* there are new entries if pi != next_idx */
1388                 if (pi == s->dqrr.next_idx)
1389                         return NULL;
1390
1391                 /* if next_idx is/was the last ring index, and 'pi' is
1392                  * different, we can disable the workaround as all the ring
1393                  * entries have now been DMA'd to so valid-bit checking is
1394                  * repaired. Note: this logic needs to be based on next_idx
1395                  * (which increments one at a time), rather than on pi (which
1396                  * can burst and wrap-around between our snapshots of it).
1397                  */
1398                 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1399                 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1400                         pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1401                                  s->dqrr.next_idx, pi);
1402                         s->dqrr.reset_bug = 0;
1403                 }
1404                 qbman_cena_invalidate_prefetch(&s->sys,
1405                                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1406         }
1407         p = qbman_cena_read_wo_shadow(&s->sys,
1408                         QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1409
1410         verb = p->dq.verb;
1411
1412         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1413          * in the DQRR reset bug workaround, we shouldn't need to skip these
1414          * check, because we've already determined that a new entry is available
1415          * and we've invalidated the cacheline before reading it, so the
1416          * valid-bit behaviour is repaired and should tell us what we already
1417          * knew from reading PI.
1418          */
1419         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1420                 return NULL;
1421
1422         /* There's something there. Move "next_idx" attention to the next ring
1423          * entry (and prefetch it) before returning what we found.
1424          */
1425         s->dqrr.next_idx++;
1426         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1427                 s->dqrr.next_idx = 0;
1428                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1429         }
1430         /* If this is the final response to a volatile dequeue command
1431          * indicate that the vdq is no longer busy
1432          */
1433         flags = p->dq.stat;
1434         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1435         if ((response_verb == QBMAN_RESULT_DQ) &&
1436             (flags & QBMAN_DQ_STAT_VOLATILE) &&
1437             (flags & QBMAN_DQ_STAT_EXPIRED))
1438                 atomic_inc(&s->vdq.busy);
1439
1440         return p;
1441 }
1442
1443 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1444 {
1445         uint32_t verb;
1446         uint32_t response_verb;
1447         uint32_t flags;
1448         const struct qbman_result *p;
1449
1450         p = qbman_cena_read_wo_shadow(&s->sys,
1451                         QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1452
1453         verb = p->dq.verb;
1454
1455         /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1456          * in the DQRR reset bug workaround, we shouldn't need to skip these
1457          * check, because we've already determined that a new entry is available
1458          * and we've invalidated the cacheline before reading it, so the
1459          * valid-bit behaviour is repaired and should tell us what we already
1460          * knew from reading PI.
1461          */
1462         if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1463                 return NULL;
1464
1465         /* There's something there. Move "next_idx" attention to the next ring
1466          * entry (and prefetch it) before returning what we found.
1467          */
1468         s->dqrr.next_idx++;
1469         if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1470                 s->dqrr.next_idx = 0;
1471                 s->dqrr.valid_bit ^= QB_VALID_BIT;
1472         }
1473         /* If this is the final response to a volatile dequeue command
1474          * indicate that the vdq is no longer busy
1475          */
1476         flags = p->dq.stat;
1477         response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1478         if ((response_verb == QBMAN_RESULT_DQ)
1479                         && (flags & QBMAN_DQ_STAT_VOLATILE)
1480                         && (flags & QBMAN_DQ_STAT_EXPIRED))
1481                 atomic_inc(&s->vdq.busy);
1482         return p;
1483 }
1484
1485 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1486 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1487                             const struct qbman_result *dq)
1488 {
1489         qbman_cinh_write(&s->sys,
1490                         QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1491 }
1492
1493 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1494 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1495                             uint8_t dqrr_index)
1496 {
1497         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1498 }
1499
1500 /*********************************/
1501 /* Polling user-provided storage */
1502 /*********************************/
1503
1504 int qbman_result_has_new_result(struct qbman_swp *s,
1505                                 struct qbman_result *dq)
1506 {
1507         if (dq->dq.tok == 0)
1508                 return 0;
1509
1510         /*
1511          * Set token to be 0 so we will detect change back to 1
1512          * next time the looping is traversed. Const is cast away here
1513          * as we want users to treat the dequeue responses as read only.
1514          */
1515         ((struct qbman_result *)dq)->dq.tok = 0;
1516
1517         /*
1518          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1519          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1520          * that makes it available. Eg. we may be looking at our 10th dequeue
1521          * result, having released VDQCR after the 1st result and it is now
1522          * busy due to some other command!
1523          */
1524         if (s->vdq.storage == dq) {
1525                 s->vdq.storage = NULL;
1526                 atomic_inc(&s->vdq.busy);
1527         }
1528
1529         return 1;
1530 }
1531
1532 int qbman_check_new_result(struct qbman_result *dq)
1533 {
1534         if (dq->dq.tok == 0)
1535                 return 0;
1536
1537         /*
1538          * Set token to be 0 so we will detect change back to 1
1539          * next time the looping is traversed. Const is cast away here
1540          * as we want users to treat the dequeue responses as read only.
1541          */
1542         ((struct qbman_result *)dq)->dq.tok = 0;
1543
1544         return 1;
1545 }
1546
1547 int qbman_check_command_complete(struct qbman_result *dq)
1548 {
1549         struct qbman_swp *s;
1550
1551         if (dq->dq.tok == 0)
1552                 return 0;
1553
1554         s = portal_idx_map[dq->dq.tok - 1];
1555         /*
1556          * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1557          * the fact "VDQCR" shows busy doesn't mean that we hold the result
1558          * that makes it available. Eg. we may be looking at our 10th dequeue
1559          * result, having released VDQCR after the 1st result and it is now
1560          * busy due to some other command!
1561          */
1562         if (s->vdq.storage == dq) {
1563                 s->vdq.storage = NULL;
1564                 atomic_inc(&s->vdq.busy);
1565         }
1566
1567         return 1;
1568 }
1569
1570 /********************************/
1571 /* Categorising qbman results   */
1572 /********************************/
1573
1574 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1575                                       uint8_t x)
1576 {
1577         uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1578
1579         return (response_verb == x);
1580 }
1581
1582 int qbman_result_is_DQ(const struct qbman_result *dq)
1583 {
1584         return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1585 }
1586
1587 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1588 {
1589         return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1590 }
1591
1592 int qbman_result_is_CDAN(const struct qbman_result *dq)
1593 {
1594         return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1595 }
1596
1597 int qbman_result_is_CSCN(const struct qbman_result *dq)
1598 {
1599         return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1600                 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1601 }
1602
1603 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1604 {
1605         return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1606 }
1607
1608 int qbman_result_is_CGCU(const struct qbman_result *dq)
1609 {
1610         return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1611 }
1612
1613 int qbman_result_is_FQRN(const struct qbman_result *dq)
1614 {
1615         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1616 }
1617
1618 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1619 {
1620         return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1621 }
1622
1623 int qbman_result_is_FQPN(const struct qbman_result *dq)
1624 {
1625         return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1626 }
1627
1628 /*********************************/
1629 /* Parsing frame dequeue results */
1630 /*********************************/
1631
1632 /* These APIs assume qbman_result_is_DQ() is TRUE */
1633
1634 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1635 {
1636         return dq->dq.stat;
1637 }
1638
1639 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1640 {
1641         return dq->dq.seqnum;
1642 }
1643
1644 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1645 {
1646         return dq->dq.oprid;
1647 }
1648
1649 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1650 {
1651         return dq->dq.fqid;
1652 }
1653
1654 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1655 {
1656         return dq->dq.fq_byte_cnt;
1657 }
1658
1659 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1660 {
1661         return dq->dq.fq_frm_cnt;
1662 }
1663
1664 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1665 {
1666         return dq->dq.fqd_ctx;
1667 }
1668
1669 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1670 {
1671         return (const struct qbman_fd *)&dq->dq.fd[0];
1672 }
1673
1674 /**************************************/
1675 /* Parsing state-change notifications */
1676 /**************************************/
1677 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1678 {
1679         return scn->scn.state;
1680 }
1681
1682 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1683 {
1684         return scn->scn.rid_tok;
1685 }
1686
1687 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1688 {
1689         return scn->scn.ctx;
1690 }
1691
1692 /*****************/
1693 /* Parsing BPSCN */
1694 /*****************/
1695 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1696 {
1697         return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1698 }
1699
1700 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1701 {
1702         return !(int)(qbman_result_SCN_state(scn) & 0x1);
1703 }
1704
1705 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1706 {
1707         return (int)(qbman_result_SCN_state(scn) & 0x2);
1708 }
1709
1710 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1711 {
1712         return (int)(qbman_result_SCN_state(scn) & 0x4);
1713 }
1714
1715 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1716 {
1717         return qbman_result_SCN_ctx(scn);
1718 }
1719
1720 /*****************/
1721 /* Parsing CGCU  */
1722 /*****************/
1723 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
1724 {
1725         return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
1726 }
1727
1728 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
1729 {
1730         return qbman_result_SCN_ctx(scn);
1731 }
1732
1733 /********************/
1734 /* Parsing EQ RESP  */
1735 /********************/
1736 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
1737 {
1738         return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
1739 }
1740
1741 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
1742 {
1743         eqresp->eq_resp.rspid = val;
1744 }
1745
1746 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
1747 {
1748         return eqresp->eq_resp.rspid;
1749 }
1750
1751 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
1752 {
1753         if (eqresp->eq_resp.rc == 0xE)
1754                 return 0;
1755         else
1756                 return -1;
1757 }
1758
1759 /******************/
1760 /* Buffer release */
1761 /******************/
1762 #define QB_BR_RC_VALID_SHIFT  5
1763 #define QB_BR_RCDI_SHIFT      6
1764
1765 void qbman_release_desc_clear(struct qbman_release_desc *d)
1766 {
1767         memset(d, 0, sizeof(*d));
1768         d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
1769 }
1770
1771 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
1772 {
1773         d->br.bpid = bpid;
1774 }
1775
1776 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1777 {
1778         if (enable)
1779                 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
1780         else
1781                 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
1782 }
1783
1784 #define RAR_IDX(rar)     ((rar) & 0x7)
1785 #define RAR_VB(rar)      ((rar) & 0x80)
1786 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1787
1788 static int qbman_swp_release_direct(struct qbman_swp *s,
1789                                     const struct qbman_release_desc *d,
1790                                     const uint64_t *buffers,
1791                                     unsigned int num_buffers)
1792 {
1793         uint32_t *p;
1794         const uint32_t *cl = qb_cl(d);
1795         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1796
1797         pr_debug("RAR=%08x\n", rar);
1798         if (!RAR_SUCCESS(rar))
1799                 return -EBUSY;
1800
1801         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1802
1803         /* Start the release command */
1804         p = qbman_cena_write_start_wo_shadow(&s->sys,
1805                                      QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1806
1807         /* Copy the caller's buffer pointers to the command */
1808         u64_to_le32_copy(&p[2], buffers, num_buffers);
1809
1810         /* Set the verb byte, have to substitute in the valid-bit and the
1811          * number of buffers.
1812          */
1813         lwsync();
1814         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1815         qbman_cena_write_complete_wo_shadow(&s->sys,
1816                                     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1817
1818         return 0;
1819 }
1820
1821 static int qbman_swp_release_mem_back(struct qbman_swp *s,
1822                                       const struct qbman_release_desc *d,
1823                                       const uint64_t *buffers,
1824                                       unsigned int num_buffers)
1825 {
1826         uint32_t *p;
1827         const uint32_t *cl = qb_cl(d);
1828         uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
1829
1830         pr_debug("RAR=%08x\n", rar);
1831         if (!RAR_SUCCESS(rar))
1832                 return -EBUSY;
1833
1834         QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
1835
1836         /* Start the release command */
1837         p = qbman_cena_write_start_wo_shadow(&s->sys,
1838                 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1839
1840         /* Copy the caller's buffer pointers to the command */
1841         u64_to_le32_copy(&p[2], buffers, num_buffers);
1842
1843         /* Set the verb byte, have to substitute in the valid-bit and the
1844          * number of buffers.
1845          */
1846         p[0] = cl[0] | RAR_VB(rar) | num_buffers;
1847         lwsync();
1848         qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
1849                 RAR_IDX(rar) * 4, QMAN_RT_MODE);
1850
1851         return 0;
1852 }
1853
1854 inline int qbman_swp_release(struct qbman_swp *s,
1855                              const struct qbman_release_desc *d,
1856                              const uint64_t *buffers,
1857                              unsigned int num_buffers)
1858 {
1859         return qbman_swp_release_ptr(s, d, buffers, num_buffers);
1860 }
1861
1862 /*******************/
1863 /* Buffer acquires */
1864 /*******************/
1865 struct qbman_acquire_desc {
1866         uint8_t verb;
1867         uint8_t reserved;
1868         uint16_t bpid;
1869         uint8_t num;
1870         uint8_t reserved2[59];
1871 };
1872
1873 struct qbman_acquire_rslt {
1874         uint8_t verb;
1875         uint8_t rslt;
1876         uint16_t reserved;
1877         uint8_t num;
1878         uint8_t reserved2[3];
1879         uint64_t buf[7];
1880 };
1881
1882 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1883                       unsigned int num_buffers)
1884 {
1885         struct qbman_acquire_desc *p;
1886         struct qbman_acquire_rslt *r;
1887
1888         if (!num_buffers || (num_buffers > 7))
1889                 return -EINVAL;
1890
1891         /* Start the management command */
1892         p = qbman_swp_mc_start(s);
1893
1894         if (!p)
1895                 return -EBUSY;
1896
1897         /* Encode the caller-provided attributes */
1898         p->bpid = bpid;
1899         p->num = num_buffers;
1900
1901         /* Complete the management command */
1902         r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1903         if (!r) {
1904                 pr_err("qbman: acquire from BPID %d failed, no response\n",
1905                        bpid);
1906                 return -EIO;
1907         }
1908
1909         /* Decode the outcome */
1910         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
1911
1912         /* Determine success or failure */
1913         if (r->rslt != QBMAN_MC_RSLT_OK) {
1914                 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
1915                        bpid, r->rslt);
1916                 return -EIO;
1917         }
1918
1919         QBMAN_BUG_ON(r->num > num_buffers);
1920
1921         /* Copy the acquired buffers to the caller's array */
1922         u64_from_le32_copy(buffers, &r->buf[0], r->num);
1923
1924         return (int)r->num;
1925 }
1926
1927 /*****************/
1928 /* FQ management */
1929 /*****************/
1930 struct qbman_alt_fq_state_desc {
1931         uint8_t verb;
1932         uint8_t reserved[3];
1933         uint32_t fqid;
1934         uint8_t reserved2[56];
1935 };
1936
1937 struct qbman_alt_fq_state_rslt {
1938         uint8_t verb;
1939         uint8_t rslt;
1940         uint8_t reserved[62];
1941 };
1942
1943 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1944
1945 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
1946                                   uint8_t alt_fq_verb)
1947 {
1948         struct qbman_alt_fq_state_desc *p;
1949         struct qbman_alt_fq_state_rslt *r;
1950
1951         /* Start the management command */
1952         p = qbman_swp_mc_start(s);
1953         if (!p)
1954                 return -EBUSY;
1955
1956         p->fqid = fqid & ALT_FQ_FQID_MASK;
1957
1958         /* Complete the management command */
1959         r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1960         if (!r) {
1961                 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1962                        alt_fq_verb);
1963                 return -EIO;
1964         }
1965
1966         /* Decode the outcome */
1967         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
1968
1969         /* Determine success or failure */
1970         if (r->rslt != QBMAN_MC_RSLT_OK) {
1971                 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
1972                        fqid, alt_fq_verb, r->rslt);
1973                 return -EIO;
1974         }
1975
1976         return 0;
1977 }
1978
1979 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
1980 {
1981         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
1982 }
1983
1984 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
1985 {
1986         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
1987 }
1988
1989 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
1990 {
1991         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
1992 }
1993
1994 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
1995 {
1996         return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
1997 }
1998
1999 /**********************/
2000 /* Channel management */
2001 /**********************/
2002
2003 struct qbman_cdan_ctrl_desc {
2004         uint8_t verb;
2005         uint8_t reserved;
2006         uint16_t ch;
2007         uint8_t we;
2008         uint8_t ctrl;
2009         uint16_t reserved2;
2010         uint64_t cdan_ctx;
2011         uint8_t reserved3[48];
2012
2013 };
2014
2015 struct qbman_cdan_ctrl_rslt {
2016         uint8_t verb;
2017         uint8_t rslt;
2018         uint16_t ch;
2019         uint8_t reserved[60];
2020 };
2021
2022 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2023  * would be irresponsible to expose it.
2024  */
2025 #define CODE_CDAN_WE_EN    0x1
2026 #define CODE_CDAN_WE_CTX   0x4
2027
2028 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2029                               uint8_t we_mask, uint8_t cdan_en,
2030                               uint64_t ctx)
2031 {
2032         struct qbman_cdan_ctrl_desc *p;
2033         struct qbman_cdan_ctrl_rslt *r;
2034
2035         /* Start the management command */
2036         p = qbman_swp_mc_start(s);
2037         if (!p)
2038                 return -EBUSY;
2039
2040         /* Encode the caller-provided attributes */
2041         p->ch = channelid;
2042         p->we = we_mask;
2043         if (cdan_en)
2044                 p->ctrl = 1;
2045         else
2046                 p->ctrl = 0;
2047         p->cdan_ctx = ctx;
2048
2049         /* Complete the management command */
2050         r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2051         if (!r) {
2052                 pr_err("qbman: wqchan config failed, no response\n");
2053                 return -EIO;
2054         }
2055
2056         /* Decode the outcome */
2057         QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2058                      != QBMAN_WQCHAN_CONFIGURE);
2059
2060         /* Determine success or failure */
2061         if (r->rslt != QBMAN_MC_RSLT_OK) {
2062                 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2063                        channelid, r->rslt);
2064                 return -EIO;
2065         }
2066
2067         return 0;
2068 }
2069
2070 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2071                                uint64_t ctx)
2072 {
2073         return qbman_swp_CDAN_set(s, channelid,
2074                                   CODE_CDAN_WE_CTX,
2075                                   0, ctx);
2076 }
2077
2078 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2079 {
2080         return qbman_swp_CDAN_set(s, channelid,
2081                                   CODE_CDAN_WE_EN,
2082                                   1, 0);
2083 }
2084
2085 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2086 {
2087         return qbman_swp_CDAN_set(s, channelid,
2088                                   CODE_CDAN_WE_EN,
2089                                   0, 0);
2090 }
2091
2092 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2093                                       uint64_t ctx)
2094 {
2095         return qbman_swp_CDAN_set(s, channelid,
2096                                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2097                                   1, ctx);
2098 }
2099
2100 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2101 {
2102         return QBMAN_IDX_FROM_DQRR(dqrr);
2103 }
2104
2105 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2106 {
2107         struct qbman_result *dq;
2108
2109         dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
2110         return dq;
2111 }