1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2018-2020 NXP
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 uint32_t qman_version;
66 /* Internal Function declaration */
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69 const struct qbman_eq_desc *d,
70 const struct qbman_fd *fd);
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73 const struct qbman_eq_desc *d,
74 const struct qbman_fd *fd);
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78 const struct qbman_eq_desc *d,
79 const struct qbman_fd *fd);
81 qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
82 const struct qbman_eq_desc *d,
83 const struct qbman_fd *fd);
85 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
86 const struct qbman_eq_desc *d,
87 const struct qbman_fd *fd);
89 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
90 const struct qbman_eq_desc *d,
91 const struct qbman_fd *fd);
94 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
95 const struct qbman_eq_desc *d,
96 const struct qbman_fd *fd,
100 qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct qbman_fd *fd,
106 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
107 const struct qbman_eq_desc *d,
108 const struct qbman_fd *fd,
112 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
113 const struct qbman_eq_desc *d,
114 const struct qbman_fd *fd,
119 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 struct qbman_fd **fd,
125 qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
126 const struct qbman_eq_desc *d,
127 struct qbman_fd **fd,
131 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
132 const struct qbman_eq_desc *d,
133 struct qbman_fd **fd,
137 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
138 const struct qbman_eq_desc *d,
139 struct qbman_fd **fd,
144 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
145 const struct qbman_eq_desc *d,
146 const struct qbman_fd *fd,
149 qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
150 const struct qbman_eq_desc *d,
151 const struct qbman_fd *fd,
154 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct qbman_fd *fd,
159 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
160 const struct qbman_eq_desc *d,
161 const struct qbman_fd *fd,
165 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
167 qbman_swp_pull_cinh_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
169 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
171 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
172 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s);
173 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
176 qbman_swp_release_direct(struct qbman_swp *s,
177 const struct qbman_release_desc *d,
178 const uint64_t *buffers, unsigned int num_buffers);
180 qbman_swp_release_cinh_direct(struct qbman_swp *s,
181 const struct qbman_release_desc *d,
182 const uint64_t *buffers, unsigned int num_buffers);
184 qbman_swp_release_mem_back(struct qbman_swp *s,
185 const struct qbman_release_desc *d,
186 const uint64_t *buffers, unsigned int num_buffers);
188 /* Function pointers */
189 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
190 const struct qbman_eq_desc *d,
191 const struct qbman_fd *fd)
192 = qbman_swp_enqueue_array_mode_direct;
194 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
195 const struct qbman_eq_desc *d,
196 const struct qbman_fd *fd)
197 = qbman_swp_enqueue_ring_mode_direct;
199 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
200 const struct qbman_eq_desc *d,
201 const struct qbman_fd *fd,
204 = qbman_swp_enqueue_multiple_direct;
206 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
207 const struct qbman_eq_desc *d,
208 struct qbman_fd **fd,
211 = qbman_swp_enqueue_multiple_fd_direct;
213 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
214 const struct qbman_eq_desc *d,
215 const struct qbman_fd *fd,
217 = qbman_swp_enqueue_multiple_desc_direct;
219 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
220 struct qbman_pull_desc *d)
221 = qbman_swp_pull_direct;
223 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
224 = qbman_swp_dqrr_next_direct;
226 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
227 const struct qbman_release_desc *d,
228 const uint64_t *buffers, unsigned int num_buffers)
229 = qbman_swp_release_direct;
231 /*********************************/
232 /* Portal constructor/destructor */
233 /*********************************/
235 /* Software portals should always be in the power-on state when we initialise,
236 * due to the CCSR-based portal reset functionality that MC has.
238 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
239 * valid-bits, so we need to support a workaround where we don't trust
240 * valid-bits when detecting new entries until any stale ring entries have been
241 * overwritten at least once. The idea is that we read PI for the first few
242 * entries, then switch to valid-bit after that. The trick is to clear the
243 * bug-work-around boolean once the PI wraps around the ring for the first time.
245 * Note: this still carries a slight additional cost once the decrementer hits
248 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
253 struct qbman_swp *p = malloc(sizeof(*p));
258 memset(p, 0, sizeof(struct qbman_swp));
261 #ifdef QBMAN_CHECKING
262 p->mc.check = swp_mc_can_start;
264 p->mc.valid_bit = QB_VALID_BIT;
265 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
266 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
267 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
268 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
269 && (d->cena_access_mode == qman_cena_fastest_access))
270 p->mr.valid_bit = QB_VALID_BIT;
272 atomic_set(&p->vdq.busy, 1);
273 p->vdq.valid_bit = QB_VALID_BIT;
274 p->dqrr.valid_bit = QB_VALID_BIT;
275 qman_version = p->desc.qman_version;
276 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
277 p->dqrr.dqrr_size = 4;
278 p->dqrr.reset_bug = 1;
280 p->dqrr.dqrr_size = 8;
281 p->dqrr.reset_bug = 0;
284 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
287 pr_err("qbman_swp_sys_init() failed %d\n", ret);
291 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
292 * in default state which is an error
294 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
295 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
300 /* SDQCR needs to be initialized to 0 when no channels are
301 * being dequeued from or else the QMan HW will indicate an
302 * error. The values that were calculated above will be
303 * applied when dequeues from a specific channel are enabled.
305 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
307 p->eqcr.pi_ring_size = 8;
308 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
309 && (d->cena_access_mode == qman_cena_fastest_access)) {
310 p->eqcr.pi_ring_size = 32;
311 qbman_swp_enqueue_array_mode_ptr =
312 qbman_swp_enqueue_array_mode_mem_back;
313 qbman_swp_enqueue_ring_mode_ptr =
314 qbman_swp_enqueue_ring_mode_mem_back;
315 qbman_swp_enqueue_multiple_ptr =
316 qbman_swp_enqueue_multiple_mem_back;
317 qbman_swp_enqueue_multiple_fd_ptr =
318 qbman_swp_enqueue_multiple_fd_mem_back;
319 qbman_swp_enqueue_multiple_desc_ptr =
320 qbman_swp_enqueue_multiple_desc_mem_back;
321 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
322 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
323 qbman_swp_release_ptr = qbman_swp_release_mem_back;
326 if (dpaa2_svr_family == SVR_LS1080A) {
327 qbman_swp_enqueue_ring_mode_ptr =
328 qbman_swp_enqueue_ring_mode_cinh_read_direct;
329 qbman_swp_enqueue_multiple_ptr =
330 qbman_swp_enqueue_multiple_cinh_read_direct;
331 qbman_swp_enqueue_multiple_fd_ptr =
332 qbman_swp_enqueue_multiple_fd_cinh_read_direct;
333 qbman_swp_enqueue_multiple_desc_ptr =
334 qbman_swp_enqueue_multiple_desc_cinh_read_direct;
337 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
338 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
339 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
340 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
341 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
342 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
343 & p->eqcr.pi_ci_mask;
344 p->eqcr.available = p->eqcr.pi_ring_size;
346 portal_idx_map[p->desc.idx] = p;
350 int qbman_swp_update(struct qbman_swp *p, int stash_off)
352 const struct qbman_swp_desc *d = &p->desc;
353 struct qbman_swp_sys *s = &p->sys;
356 /* Nothing needs to be done for QBMAN rev > 5000 with fast access */
357 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
358 && (d->cena_access_mode == qman_cena_fastest_access))
361 ret = qbman_swp_sys_update(s, d, p->dqrr.dqrr_size, stash_off);
363 pr_err("qbman_swp_sys_init() failed %d\n", ret);
367 p->stash_off = stash_off;
372 void qbman_swp_finish(struct qbman_swp *p)
374 #ifdef QBMAN_CHECKING
375 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
377 qbman_swp_sys_finish(&p->sys);
378 portal_idx_map[p->desc.idx] = NULL;
382 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
391 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
393 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
396 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
398 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
401 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
403 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
406 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
408 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
411 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
413 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
416 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
418 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
421 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
423 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
426 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
428 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
431 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
433 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
436 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
438 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
441 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
443 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
446 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
448 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
449 inhibit ? 0xffffffff : 0);
452 /***********************/
453 /* Management commands */
454 /***********************/
457 * Internal code common to all types of management commands.
460 void *qbman_swp_mc_start(struct qbman_swp *p)
463 #ifdef QBMAN_CHECKING
464 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
466 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
467 && (p->desc.cena_access_mode == qman_cena_fastest_access))
468 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
470 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
471 #ifdef QBMAN_CHECKING
473 p->mc.check = swp_mc_can_submit;
478 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
481 #ifdef QBMAN_CHECKING
482 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
484 /* TBD: "|=" is going to hurt performance. Need to move as many fields
485 * out of word zero, and for those that remain, the "OR" needs to occur
486 * at the caller side. This debug check helps to catch cases where the
487 * caller wants to OR but has forgotten to do so.
489 QBMAN_BUG_ON((*v & cmd_verb) != *v);
490 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
491 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
492 *v = cmd_verb | p->mr.valid_bit;
493 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
495 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
498 *v = cmd_verb | p->mc.valid_bit;
499 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
502 #ifdef QBMAN_CHECKING
503 p->mc.check = swp_mc_can_poll;
507 void qbman_swp_mc_submit_cinh(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
510 #ifdef QBMAN_CHECKING
511 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
513 /* TBD: "|=" is going to hurt performance. Need to move as many fields
514 * out of word zero, and for those that remain, the "OR" needs to occur
515 * at the caller side. This debug check helps to catch cases where the
516 * caller wants to OR but has forgotten to do so.
518 QBMAN_BUG_ON((*v & cmd_verb) != *v);
520 *v = cmd_verb | p->mc.valid_bit;
521 qbman_cinh_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
523 #ifdef QBMAN_CHECKING
524 p->mc.check = swp_mc_can_poll;
528 void *qbman_swp_mc_result(struct qbman_swp *p)
531 #ifdef QBMAN_CHECKING
532 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
534 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
535 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
536 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
537 /* Command completed if the valid bit is toggled */
538 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
540 /* Remove the valid-bit -
541 * command completed iff the rest is non-zero
543 verb = ret[0] & ~QB_VALID_BIT;
546 p->mr.valid_bit ^= QB_VALID_BIT;
548 qbman_cena_invalidate_prefetch(&p->sys,
549 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
550 ret = qbman_cena_read(&p->sys,
551 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
552 /* Remove the valid-bit -
553 * command completed iff the rest is non-zero
555 verb = ret[0] & ~QB_VALID_BIT;
558 p->mc.valid_bit ^= QB_VALID_BIT;
560 #ifdef QBMAN_CHECKING
561 p->mc.check = swp_mc_can_start;
566 void *qbman_swp_mc_result_cinh(struct qbman_swp *p)
569 #ifdef QBMAN_CHECKING
570 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
572 ret = qbman_cinh_read_shadow(&p->sys,
573 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
574 /* Remove the valid-bit -
575 * command completed iff the rest is non-zero
577 verb = ret[0] & ~QB_VALID_BIT;
580 p->mc.valid_bit ^= QB_VALID_BIT;
581 #ifdef QBMAN_CHECKING
582 p->mc.check = swp_mc_can_start;
591 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
592 enum qb_enqueue_commands {
594 enqueue_response_always = 1,
595 enqueue_rejects_to_fq = 2
598 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
599 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
600 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
601 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
602 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
603 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
604 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
605 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
607 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
609 memset(d, 0, sizeof(*d));
612 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
614 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
616 d->eq.verb |= enqueue_response_always;
618 d->eq.verb |= enqueue_rejects_to_fq;
621 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
622 uint16_t opr_id, uint16_t seqnum, int incomplete)
624 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
626 d->eq.verb |= enqueue_response_always;
628 d->eq.verb |= enqueue_rejects_to_fq;
630 d->eq.orpid = opr_id;
631 d->eq.seqnum = seqnum;
633 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
635 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
638 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
641 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
642 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
643 d->eq.orpid = opr_id;
644 d->eq.seqnum = seqnum;
645 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
646 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
649 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
652 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
653 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
654 d->eq.orpid = opr_id;
655 d->eq.seqnum = seqnum;
656 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
657 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
660 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
661 dma_addr_t storage_phys,
664 d->eq.rsp_addr = storage_phys;
668 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
673 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
675 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
679 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
680 uint16_t qd_bin, uint8_t qd_prio)
682 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
684 d->eq.qdbin = qd_bin;
685 d->eq.qpri = qd_prio;
688 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
691 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
693 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
696 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
697 uint8_t dqrr_idx, int park)
700 d->eq.dca = dqrr_idx;
702 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
704 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
705 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
707 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
711 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
712 #define EQAR_VB(eqar) ((eqar) & 0x80)
713 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
715 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
719 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
722 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
727 static void memcpy_byte_by_byte(void *to, const void *from, size_t n)
729 const uint8_t *src = from;
730 volatile uint8_t *dest = to;
733 for (i = 0; i < n; i++)
738 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
739 const struct qbman_eq_desc *d,
740 const struct qbman_fd *fd)
743 const uint32_t *cl = qb_cl(d);
744 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
746 pr_debug("EQAR=%08x\n", eqar);
747 if (!EQAR_SUCCESS(eqar))
749 p = qbman_cena_write_start_wo_shadow(&s->sys,
750 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
751 memcpy(&p[1], &cl[1], 28);
752 memcpy(&p[8], fd, sizeof(*fd));
754 /* Set the verb byte, have to substitute in the valid-bit */
756 p[0] = cl[0] | EQAR_VB(eqar);
757 qbman_cena_write_complete_wo_shadow(&s->sys,
758 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
761 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
762 const struct qbman_eq_desc *d,
763 const struct qbman_fd *fd)
766 const uint32_t *cl = qb_cl(d);
767 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
769 pr_debug("EQAR=%08x\n", eqar);
770 if (!EQAR_SUCCESS(eqar))
772 p = qbman_cena_write_start_wo_shadow(&s->sys,
773 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
774 memcpy(&p[1], &cl[1], 28);
775 memcpy(&p[8], fd, sizeof(*fd));
777 /* Set the verb byte, have to substitute in the valid-bit */
778 p[0] = cl[0] | EQAR_VB(eqar);
780 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
784 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
785 const struct qbman_eq_desc *d,
786 const struct qbman_fd *fd)
788 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
791 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
792 const struct qbman_eq_desc *d,
793 const struct qbman_fd *fd)
796 const uint32_t *cl = qb_cl(d);
797 uint32_t eqcr_ci, full_mask, half_mask;
799 half_mask = (s->eqcr.pi_ci_mask>>1);
800 full_mask = s->eqcr.pi_ci_mask;
801 if (!s->eqcr.available) {
802 eqcr_ci = s->eqcr.ci;
803 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
804 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
805 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
806 eqcr_ci, s->eqcr.ci);
807 if (!s->eqcr.available)
811 p = qbman_cena_write_start_wo_shadow(&s->sys,
812 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
813 memcpy(&p[1], &cl[1], 28);
814 memcpy(&p[8], fd, sizeof(*fd));
817 /* Set the verb byte, have to substitute in the valid-bit */
818 p[0] = cl[0] | s->eqcr.pi_vb;
819 qbman_cena_write_complete_wo_shadow(&s->sys,
820 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
822 s->eqcr.pi &= full_mask;
824 if (!(s->eqcr.pi & half_mask))
825 s->eqcr.pi_vb ^= QB_VALID_BIT;
830 static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
832 const struct qbman_eq_desc *d,
833 const struct qbman_fd *fd)
836 const uint32_t *cl = qb_cl(d);
837 uint32_t eqcr_ci, full_mask, half_mask;
839 half_mask = (s->eqcr.pi_ci_mask>>1);
840 full_mask = s->eqcr.pi_ci_mask;
841 if (!s->eqcr.available) {
842 eqcr_ci = s->eqcr.ci;
843 s->eqcr.ci = qbman_cinh_read(&s->sys,
844 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
845 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
846 eqcr_ci, s->eqcr.ci);
847 if (!s->eqcr.available)
851 p = qbman_cinh_write_start_wo_shadow(&s->sys,
852 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
853 memcpy(&p[1], &cl[1], 28);
854 memcpy(&p[8], fd, sizeof(*fd));
857 /* Set the verb byte, have to substitute in the valid-bit */
858 p[0] = cl[0] | s->eqcr.pi_vb;
860 s->eqcr.pi &= full_mask;
862 if (!(s->eqcr.pi & half_mask))
863 s->eqcr.pi_vb ^= QB_VALID_BIT;
868 static int qbman_swp_enqueue_ring_mode_cinh_direct(
870 const struct qbman_eq_desc *d,
871 const struct qbman_fd *fd)
874 const uint32_t *cl = qb_cl(d);
875 uint32_t eqcr_ci, full_mask, half_mask;
877 half_mask = (s->eqcr.pi_ci_mask>>1);
878 full_mask = s->eqcr.pi_ci_mask;
879 if (!s->eqcr.available) {
880 eqcr_ci = s->eqcr.ci;
881 s->eqcr.ci = qbman_cinh_read(&s->sys,
882 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
883 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
884 eqcr_ci, s->eqcr.ci);
885 if (!s->eqcr.available)
889 p = qbman_cinh_write_start_wo_shadow(&s->sys,
890 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
891 memcpy_byte_by_byte(&p[1], &cl[1], 28);
892 memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
895 /* Set the verb byte, have to substitute in the valid-bit */
896 p[0] = cl[0] | s->eqcr.pi_vb;
898 s->eqcr.pi &= full_mask;
900 if (!(s->eqcr.pi & half_mask))
901 s->eqcr.pi_vb ^= QB_VALID_BIT;
906 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
907 const struct qbman_eq_desc *d,
908 const struct qbman_fd *fd)
911 const uint32_t *cl = qb_cl(d);
912 uint32_t eqcr_ci, full_mask, half_mask;
914 half_mask = (s->eqcr.pi_ci_mask>>1);
915 full_mask = s->eqcr.pi_ci_mask;
916 if (!s->eqcr.available) {
917 eqcr_ci = s->eqcr.ci;
918 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
919 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
920 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
921 eqcr_ci, s->eqcr.ci);
922 if (!s->eqcr.available)
926 p = qbman_cena_write_start_wo_shadow(&s->sys,
927 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
928 memcpy(&p[1], &cl[1], 28);
929 memcpy(&p[8], fd, sizeof(*fd));
931 /* Set the verb byte, have to substitute in the valid-bit */
932 p[0] = cl[0] | s->eqcr.pi_vb;
934 s->eqcr.pi &= full_mask;
936 if (!(s->eqcr.pi & half_mask))
937 s->eqcr.pi_vb ^= QB_VALID_BIT;
939 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
940 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
944 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
945 const struct qbman_eq_desc *d,
946 const struct qbman_fd *fd)
949 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
951 return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
954 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
955 const struct qbman_fd *fd)
957 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
958 return qbman_swp_enqueue_array_mode(s, d, fd);
959 else /* Use ring mode by default */
960 return qbman_swp_enqueue_ring_mode(s, d, fd);
963 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
964 const struct qbman_eq_desc *d,
965 const struct qbman_fd *fd,
970 const uint32_t *cl = qb_cl(d);
971 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
972 int i, num_enqueued = 0;
975 half_mask = (s->eqcr.pi_ci_mask>>1);
976 full_mask = s->eqcr.pi_ci_mask;
977 if (!s->eqcr.available) {
978 eqcr_ci = s->eqcr.ci;
979 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
980 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
981 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
982 eqcr_ci, s->eqcr.ci);
983 if (!s->eqcr.available)
987 eqcr_pi = s->eqcr.pi;
988 num_enqueued = (s->eqcr.available < num_frames) ?
989 s->eqcr.available : num_frames;
990 s->eqcr.available -= num_enqueued;
991 /* Fill in the EQCR ring */
992 for (i = 0; i < num_enqueued; i++) {
993 p = qbman_cena_write_start_wo_shadow(&s->sys,
994 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
995 memcpy(&p[1], &cl[1], 28);
996 memcpy(&p[8], &fd[i], sizeof(*fd));
1002 /* Set the verb byte, have to substitute in the valid-bit */
1003 eqcr_pi = s->eqcr.pi;
1004 for (i = 0; i < num_enqueued; i++) {
1005 p = qbman_cena_write_start_wo_shadow(&s->sys,
1006 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1007 p[0] = cl[0] | s->eqcr.pi_vb;
1008 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1009 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1011 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1012 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1015 if (!(eqcr_pi & half_mask))
1016 s->eqcr.pi_vb ^= QB_VALID_BIT;
1019 /* Flush all the cacheline without load/store in between */
1020 eqcr_pi = s->eqcr.pi;
1021 addr_cena = (size_t)s->sys.addr_cena;
1022 for (i = 0; i < num_enqueued; i++) {
1023 dcbf((uintptr_t)(addr_cena +
1024 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1027 s->eqcr.pi = eqcr_pi & full_mask;
1029 return num_enqueued;
1032 static int qbman_swp_enqueue_multiple_cinh_read_direct(
1033 struct qbman_swp *s,
1034 const struct qbman_eq_desc *d,
1035 const struct qbman_fd *fd,
1040 const uint32_t *cl = qb_cl(d);
1041 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1042 int i, num_enqueued = 0;
1045 half_mask = (s->eqcr.pi_ci_mask>>1);
1046 full_mask = s->eqcr.pi_ci_mask;
1047 if (!s->eqcr.available) {
1048 eqcr_ci = s->eqcr.ci;
1049 s->eqcr.ci = qbman_cinh_read(&s->sys,
1050 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1051 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1052 eqcr_ci, s->eqcr.ci);
1053 if (!s->eqcr.available)
1057 eqcr_pi = s->eqcr.pi;
1058 num_enqueued = (s->eqcr.available < num_frames) ?
1059 s->eqcr.available : num_frames;
1060 s->eqcr.available -= num_enqueued;
1061 /* Fill in the EQCR ring */
1062 for (i = 0; i < num_enqueued; i++) {
1063 p = qbman_cena_write_start_wo_shadow(&s->sys,
1064 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1065 memcpy(&p[1], &cl[1], 28);
1066 memcpy(&p[8], &fd[i], sizeof(*fd));
1072 /* Set the verb byte, have to substitute in the valid-bit */
1073 eqcr_pi = s->eqcr.pi;
1074 for (i = 0; i < num_enqueued; i++) {
1075 p = qbman_cena_write_start_wo_shadow(&s->sys,
1076 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1077 p[0] = cl[0] | s->eqcr.pi_vb;
1078 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1079 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1081 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1082 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1085 if (!(eqcr_pi & half_mask))
1086 s->eqcr.pi_vb ^= QB_VALID_BIT;
1089 /* Flush all the cacheline without load/store in between */
1090 eqcr_pi = s->eqcr.pi;
1091 addr_cena = (size_t)s->sys.addr_cena;
1092 for (i = 0; i < num_enqueued; i++) {
1094 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1097 s->eqcr.pi = eqcr_pi & full_mask;
1099 return num_enqueued;
1102 static int qbman_swp_enqueue_multiple_cinh_direct(
1103 struct qbman_swp *s,
1104 const struct qbman_eq_desc *d,
1105 const struct qbman_fd *fd,
1110 const uint32_t *cl = qb_cl(d);
1111 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1112 int i, num_enqueued = 0;
1114 half_mask = (s->eqcr.pi_ci_mask>>1);
1115 full_mask = s->eqcr.pi_ci_mask;
1116 if (!s->eqcr.available) {
1117 eqcr_ci = s->eqcr.ci;
1118 s->eqcr.ci = qbman_cinh_read(&s->sys,
1119 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1120 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1121 eqcr_ci, s->eqcr.ci);
1122 if (!s->eqcr.available)
1126 eqcr_pi = s->eqcr.pi;
1127 num_enqueued = (s->eqcr.available < num_frames) ?
1128 s->eqcr.available : num_frames;
1129 s->eqcr.available -= num_enqueued;
1130 /* Fill in the EQCR ring */
1131 for (i = 0; i < num_enqueued; i++) {
1132 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1133 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1134 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1135 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1141 /* Set the verb byte, have to substitute in the valid-bit */
1142 eqcr_pi = s->eqcr.pi;
1143 for (i = 0; i < num_enqueued; i++) {
1144 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1145 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1146 p[0] = cl[0] | s->eqcr.pi_vb;
1147 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1148 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1150 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1151 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1154 if (!(eqcr_pi & half_mask))
1155 s->eqcr.pi_vb ^= QB_VALID_BIT;
1158 s->eqcr.pi = eqcr_pi & full_mask;
1160 return num_enqueued;
1163 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
1164 const struct qbman_eq_desc *d,
1165 const struct qbman_fd *fd,
1170 const uint32_t *cl = qb_cl(d);
1171 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1172 int i, num_enqueued = 0;
1174 half_mask = (s->eqcr.pi_ci_mask>>1);
1175 full_mask = s->eqcr.pi_ci_mask;
1176 if (!s->eqcr.available) {
1177 eqcr_ci = s->eqcr.ci;
1178 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1179 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1180 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1181 eqcr_ci, s->eqcr.ci);
1182 if (!s->eqcr.available)
1186 eqcr_pi = s->eqcr.pi;
1187 num_enqueued = (s->eqcr.available < num_frames) ?
1188 s->eqcr.available : num_frames;
1189 s->eqcr.available -= num_enqueued;
1190 /* Fill in the EQCR ring */
1191 for (i = 0; i < num_enqueued; i++) {
1192 p = qbman_cena_write_start_wo_shadow(&s->sys,
1193 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1194 memcpy(&p[1], &cl[1], 28);
1195 memcpy(&p[8], &fd[i], sizeof(*fd));
1196 p[0] = cl[0] | s->eqcr.pi_vb;
1198 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1199 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1201 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1202 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1206 if (!(eqcr_pi & half_mask))
1207 s->eqcr.pi_vb ^= QB_VALID_BIT;
1209 s->eqcr.pi = eqcr_pi & full_mask;
1212 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1213 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1214 return num_enqueued;
1217 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1218 const struct qbman_eq_desc *d,
1219 const struct qbman_fd *fd,
1224 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags,
1227 return qbman_swp_enqueue_multiple_cinh_direct(s, d, fd, flags,
1231 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1232 const struct qbman_eq_desc *d,
1233 struct qbman_fd **fd,
1238 const uint32_t *cl = qb_cl(d);
1239 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1240 int i, num_enqueued = 0;
1243 half_mask = (s->eqcr.pi_ci_mask>>1);
1244 full_mask = s->eqcr.pi_ci_mask;
1245 if (!s->eqcr.available) {
1246 eqcr_ci = s->eqcr.ci;
1247 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1248 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1249 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1250 eqcr_ci, s->eqcr.ci);
1251 if (!s->eqcr.available)
1255 eqcr_pi = s->eqcr.pi;
1256 num_enqueued = (s->eqcr.available < num_frames) ?
1257 s->eqcr.available : num_frames;
1258 s->eqcr.available -= num_enqueued;
1259 /* Fill in the EQCR ring */
1260 for (i = 0; i < num_enqueued; i++) {
1261 p = qbman_cena_write_start_wo_shadow(&s->sys,
1262 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1263 memcpy(&p[1], &cl[1], 28);
1264 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1270 /* Set the verb byte, have to substitute in the valid-bit */
1271 eqcr_pi = s->eqcr.pi;
1272 for (i = 0; i < num_enqueued; i++) {
1273 p = qbman_cena_write_start_wo_shadow(&s->sys,
1274 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1275 p[0] = cl[0] | s->eqcr.pi_vb;
1276 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1277 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1279 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1280 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1283 if (!(eqcr_pi & half_mask))
1284 s->eqcr.pi_vb ^= QB_VALID_BIT;
1287 /* Flush all the cacheline without load/store in between */
1288 eqcr_pi = s->eqcr.pi;
1289 addr_cena = (size_t)s->sys.addr_cena;
1290 for (i = 0; i < num_enqueued; i++) {
1292 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1295 s->eqcr.pi = eqcr_pi & full_mask;
1297 return num_enqueued;
1300 static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
1301 struct qbman_swp *s,
1302 const struct qbman_eq_desc *d,
1303 struct qbman_fd **fd,
1308 const uint32_t *cl = qb_cl(d);
1309 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1310 int i, num_enqueued = 0;
1313 half_mask = (s->eqcr.pi_ci_mask>>1);
1314 full_mask = s->eqcr.pi_ci_mask;
1315 if (!s->eqcr.available) {
1316 eqcr_ci = s->eqcr.ci;
1317 s->eqcr.ci = qbman_cinh_read(&s->sys,
1318 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1319 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1320 eqcr_ci, s->eqcr.ci);
1321 if (!s->eqcr.available)
1325 eqcr_pi = s->eqcr.pi;
1326 num_enqueued = (s->eqcr.available < num_frames) ?
1327 s->eqcr.available : num_frames;
1328 s->eqcr.available -= num_enqueued;
1329 /* Fill in the EQCR ring */
1330 for (i = 0; i < num_enqueued; i++) {
1331 p = qbman_cena_write_start_wo_shadow(&s->sys,
1332 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1333 memcpy(&p[1], &cl[1], 28);
1334 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1340 /* Set the verb byte, have to substitute in the valid-bit */
1341 eqcr_pi = s->eqcr.pi;
1342 for (i = 0; i < num_enqueued; i++) {
1343 p = qbman_cena_write_start_wo_shadow(&s->sys,
1344 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1345 p[0] = cl[0] | s->eqcr.pi_vb;
1346 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1347 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1349 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1350 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1353 if (!(eqcr_pi & half_mask))
1354 s->eqcr.pi_vb ^= QB_VALID_BIT;
1357 /* Flush all the cacheline without load/store in between */
1358 eqcr_pi = s->eqcr.pi;
1359 addr_cena = (size_t)s->sys.addr_cena;
1360 for (i = 0; i < num_enqueued; i++) {
1362 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1365 s->eqcr.pi = eqcr_pi & full_mask;
1367 return num_enqueued;
1370 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1371 struct qbman_swp *s,
1372 const struct qbman_eq_desc *d,
1373 struct qbman_fd **fd,
1378 const uint32_t *cl = qb_cl(d);
1379 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1380 int i, num_enqueued = 0;
1382 half_mask = (s->eqcr.pi_ci_mask>>1);
1383 full_mask = s->eqcr.pi_ci_mask;
1384 if (!s->eqcr.available) {
1385 eqcr_ci = s->eqcr.ci;
1386 s->eqcr.ci = qbman_cinh_read(&s->sys,
1387 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1388 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1389 eqcr_ci, s->eqcr.ci);
1390 if (!s->eqcr.available)
1394 eqcr_pi = s->eqcr.pi;
1395 num_enqueued = (s->eqcr.available < num_frames) ?
1396 s->eqcr.available : num_frames;
1397 s->eqcr.available -= num_enqueued;
1398 /* Fill in the EQCR ring */
1399 for (i = 0; i < num_enqueued; i++) {
1400 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1401 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1402 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1403 memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
1409 /* Set the verb byte, have to substitute in the valid-bit */
1410 eqcr_pi = s->eqcr.pi;
1411 for (i = 0; i < num_enqueued; i++) {
1412 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1413 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1414 p[0] = cl[0] | s->eqcr.pi_vb;
1415 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1416 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1418 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1419 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1422 if (!(eqcr_pi & half_mask))
1423 s->eqcr.pi_vb ^= QB_VALID_BIT;
1426 s->eqcr.pi = eqcr_pi & full_mask;
1428 return num_enqueued;
1431 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1432 const struct qbman_eq_desc *d,
1433 struct qbman_fd **fd,
1438 const uint32_t *cl = qb_cl(d);
1439 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1440 int i, num_enqueued = 0;
1442 half_mask = (s->eqcr.pi_ci_mask>>1);
1443 full_mask = s->eqcr.pi_ci_mask;
1444 if (!s->eqcr.available) {
1445 eqcr_ci = s->eqcr.ci;
1446 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1447 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1448 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1449 eqcr_ci, s->eqcr.ci);
1450 if (!s->eqcr.available)
1454 eqcr_pi = s->eqcr.pi;
1455 num_enqueued = (s->eqcr.available < num_frames) ?
1456 s->eqcr.available : num_frames;
1457 s->eqcr.available -= num_enqueued;
1458 /* Fill in the EQCR ring */
1459 for (i = 0; i < num_enqueued; i++) {
1460 p = qbman_cena_write_start_wo_shadow(&s->sys,
1461 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1462 memcpy(&p[1], &cl[1], 28);
1463 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1467 /* Set the verb byte, have to substitute in the valid-bit */
1468 eqcr_pi = s->eqcr.pi;
1469 for (i = 0; i < num_enqueued; i++) {
1470 p = qbman_cena_write_start_wo_shadow(&s->sys,
1471 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1472 p[0] = cl[0] | s->eqcr.pi_vb;
1473 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1474 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1476 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1477 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1480 if (!(eqcr_pi & half_mask))
1481 s->eqcr.pi_vb ^= QB_VALID_BIT;
1483 s->eqcr.pi = eqcr_pi & full_mask;
1486 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1487 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1488 return num_enqueued;
1491 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1492 const struct qbman_eq_desc *d,
1493 struct qbman_fd **fd,
1498 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags,
1501 return qbman_swp_enqueue_multiple_fd_cinh_direct(s, d, fd,
1505 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1506 const struct qbman_eq_desc *d,
1507 const struct qbman_fd *fd,
1512 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1513 int i, num_enqueued = 0;
1516 half_mask = (s->eqcr.pi_ci_mask>>1);
1517 full_mask = s->eqcr.pi_ci_mask;
1518 if (!s->eqcr.available) {
1519 eqcr_ci = s->eqcr.ci;
1520 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1521 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1522 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1523 eqcr_ci, s->eqcr.ci);
1524 if (!s->eqcr.available)
1528 eqcr_pi = s->eqcr.pi;
1529 num_enqueued = (s->eqcr.available < num_frames) ?
1530 s->eqcr.available : num_frames;
1531 s->eqcr.available -= num_enqueued;
1532 /* Fill in the EQCR ring */
1533 for (i = 0; i < num_enqueued; i++) {
1534 p = qbman_cena_write_start_wo_shadow(&s->sys,
1535 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1537 memcpy(&p[1], &cl[1], 28);
1538 memcpy(&p[8], &fd[i], sizeof(*fd));
1544 /* Set the verb byte, have to substitute in the valid-bit */
1545 eqcr_pi = s->eqcr.pi;
1546 for (i = 0; i < num_enqueued; i++) {
1547 p = qbman_cena_write_start_wo_shadow(&s->sys,
1548 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1550 p[0] = cl[0] | s->eqcr.pi_vb;
1552 if (!(eqcr_pi & half_mask))
1553 s->eqcr.pi_vb ^= QB_VALID_BIT;
1556 /* Flush all the cacheline without load/store in between */
1557 eqcr_pi = s->eqcr.pi;
1558 addr_cena = (size_t)s->sys.addr_cena;
1559 for (i = 0; i < num_enqueued; i++) {
1560 dcbf((uintptr_t)(addr_cena +
1561 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1564 s->eqcr.pi = eqcr_pi & full_mask;
1566 return num_enqueued;
1569 static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
1570 struct qbman_swp *s,
1571 const struct qbman_eq_desc *d,
1572 const struct qbman_fd *fd,
1577 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1578 int i, num_enqueued = 0;
1581 half_mask = (s->eqcr.pi_ci_mask>>1);
1582 full_mask = s->eqcr.pi_ci_mask;
1583 if (!s->eqcr.available) {
1584 eqcr_ci = s->eqcr.ci;
1585 s->eqcr.ci = qbman_cinh_read(&s->sys,
1586 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1587 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1588 eqcr_ci, s->eqcr.ci);
1589 if (!s->eqcr.available)
1593 eqcr_pi = s->eqcr.pi;
1594 num_enqueued = (s->eqcr.available < num_frames) ?
1595 s->eqcr.available : num_frames;
1596 s->eqcr.available -= num_enqueued;
1597 /* Fill in the EQCR ring */
1598 for (i = 0; i < num_enqueued; i++) {
1599 p = qbman_cena_write_start_wo_shadow(&s->sys,
1600 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1602 memcpy(&p[1], &cl[1], 28);
1603 memcpy(&p[8], &fd[i], sizeof(*fd));
1609 /* Set the verb byte, have to substitute in the valid-bit */
1610 eqcr_pi = s->eqcr.pi;
1611 for (i = 0; i < num_enqueued; i++) {
1612 p = qbman_cena_write_start_wo_shadow(&s->sys,
1613 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1615 p[0] = cl[0] | s->eqcr.pi_vb;
1617 if (!(eqcr_pi & half_mask))
1618 s->eqcr.pi_vb ^= QB_VALID_BIT;
1621 /* Flush all the cacheline without load/store in between */
1622 eqcr_pi = s->eqcr.pi;
1623 addr_cena = (size_t)s->sys.addr_cena;
1624 for (i = 0; i < num_enqueued; i++) {
1626 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1629 s->eqcr.pi = eqcr_pi & full_mask;
1631 return num_enqueued;
1634 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1635 struct qbman_swp *s,
1636 const struct qbman_eq_desc *d,
1637 const struct qbman_fd *fd,
1642 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1643 int i, num_enqueued = 0;
1645 half_mask = (s->eqcr.pi_ci_mask>>1);
1646 full_mask = s->eqcr.pi_ci_mask;
1647 if (!s->eqcr.available) {
1648 eqcr_ci = s->eqcr.ci;
1649 s->eqcr.ci = qbman_cinh_read(&s->sys,
1650 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1651 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1652 eqcr_ci, s->eqcr.ci);
1653 if (!s->eqcr.available)
1657 eqcr_pi = s->eqcr.pi;
1658 num_enqueued = (s->eqcr.available < num_frames) ?
1659 s->eqcr.available : num_frames;
1660 s->eqcr.available -= num_enqueued;
1661 /* Fill in the EQCR ring */
1662 for (i = 0; i < num_enqueued; i++) {
1663 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1664 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1666 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1667 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1673 /* Set the verb byte, have to substitute in the valid-bit */
1674 eqcr_pi = s->eqcr.pi;
1675 for (i = 0; i < num_enqueued; i++) {
1676 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1677 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1679 p[0] = cl[0] | s->eqcr.pi_vb;
1681 if (!(eqcr_pi & half_mask))
1682 s->eqcr.pi_vb ^= QB_VALID_BIT;
1685 s->eqcr.pi = eqcr_pi & full_mask;
1687 return num_enqueued;
1690 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1691 const struct qbman_eq_desc *d,
1692 const struct qbman_fd *fd,
1697 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1698 int i, num_enqueued = 0;
1700 half_mask = (s->eqcr.pi_ci_mask>>1);
1701 full_mask = s->eqcr.pi_ci_mask;
1702 if (!s->eqcr.available) {
1703 eqcr_ci = s->eqcr.ci;
1704 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1705 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1706 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1707 eqcr_ci, s->eqcr.ci);
1708 if (!s->eqcr.available)
1712 eqcr_pi = s->eqcr.pi;
1713 num_enqueued = (s->eqcr.available < num_frames) ?
1714 s->eqcr.available : num_frames;
1715 s->eqcr.available -= num_enqueued;
1716 /* Fill in the EQCR ring */
1717 for (i = 0; i < num_enqueued; i++) {
1718 p = qbman_cena_write_start_wo_shadow(&s->sys,
1719 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1721 memcpy(&p[1], &cl[1], 28);
1722 memcpy(&p[8], &fd[i], sizeof(*fd));
1726 /* Set the verb byte, have to substitute in the valid-bit */
1727 eqcr_pi = s->eqcr.pi;
1728 for (i = 0; i < num_enqueued; i++) {
1729 p = qbman_cena_write_start_wo_shadow(&s->sys,
1730 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1732 p[0] = cl[0] | s->eqcr.pi_vb;
1734 if (!(eqcr_pi & half_mask))
1735 s->eqcr.pi_vb ^= QB_VALID_BIT;
1738 s->eqcr.pi = eqcr_pi & full_mask;
1741 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1742 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1744 return num_enqueued;
1746 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1747 const struct qbman_eq_desc *d,
1748 const struct qbman_fd *fd,
1752 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd,
1755 return qbman_swp_enqueue_multiple_desc_cinh_direct(s, d, fd,
1760 /*************************/
1761 /* Static (push) dequeue */
1762 /*************************/
1764 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1766 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1768 QBMAN_BUG_ON(channel_idx > 15);
1769 *enabled = src | (1 << channel_idx);
1772 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1776 QBMAN_BUG_ON(channel_idx > 15);
1778 s->sdq |= 1 << channel_idx;
1780 s->sdq &= ~(1 << channel_idx);
1782 /* Read make the complete src map. If no channels are enabled
1783 * the SDQCR must be 0 or else QMan will assert errors
1785 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1787 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1789 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1792 /***************************/
1793 /* Volatile (pull) dequeue */
1794 /***************************/
1796 /* These should be const, eventually */
1797 #define QB_VDQCR_VERB_DCT_SHIFT 0
1798 #define QB_VDQCR_VERB_DT_SHIFT 2
1799 #define QB_VDQCR_VERB_RLS_SHIFT 4
1800 #define QB_VDQCR_VERB_WAE_SHIFT 5
1801 #define QB_VDQCR_VERB_RAD_SHIFT 6
1805 qb_pull_dt_workqueue,
1806 qb_pull_dt_framequeue
1809 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1811 memset(d, 0, sizeof(*d));
1814 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1815 struct qbman_result *storage,
1816 dma_addr_t storage_phys,
1819 d->pull.rsp_addr_virt = (size_t)storage;
1822 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1825 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1827 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1829 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1831 d->pull.rsp_addr = storage_phys;
1834 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1837 d->pull.numf = numframes - 1;
1840 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1842 d->pull.tok = token;
1845 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1847 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1848 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1849 d->pull.dq_src = fqid;
1852 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1853 enum qbman_pull_type_e dct)
1855 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1856 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1857 d->pull.dq_src = wqid;
1860 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1861 enum qbman_pull_type_e dct)
1863 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1864 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1865 d->pull.dq_src = chid;
1868 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1870 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1872 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1874 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1876 printf("The RAD feature is not valid when RLS = 0\n");
1880 static int qbman_swp_pull_direct(struct qbman_swp *s,
1881 struct qbman_pull_desc *d)
1884 uint32_t *cl = qb_cl(d);
1886 if (!atomic_dec_and_test(&s->vdq.busy)) {
1887 atomic_inc(&s->vdq.busy);
1891 d->pull.tok = s->sys.idx + 1;
1892 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1893 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1894 memcpy(&p[1], &cl[1], 12);
1896 /* Set the verb byte, have to substitute in the valid-bit */
1898 p[0] = cl[0] | s->vdq.valid_bit;
1899 s->vdq.valid_bit ^= QB_VALID_BIT;
1900 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1905 static int qbman_swp_pull_cinh_direct(struct qbman_swp *s,
1906 struct qbman_pull_desc *d)
1909 uint32_t *cl = qb_cl(d);
1911 if (!atomic_dec_and_test(&s->vdq.busy)) {
1912 atomic_inc(&s->vdq.busy);
1916 d->pull.tok = s->sys.idx + 1;
1917 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1918 p = qbman_cinh_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1919 memcpy_byte_by_byte(&p[1], &cl[1], 12);
1921 /* Set the verb byte, have to substitute in the valid-bit */
1923 p[0] = cl[0] | s->vdq.valid_bit;
1924 s->vdq.valid_bit ^= QB_VALID_BIT;
1929 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1930 struct qbman_pull_desc *d)
1933 uint32_t *cl = qb_cl(d);
1935 if (!atomic_dec_and_test(&s->vdq.busy)) {
1936 atomic_inc(&s->vdq.busy);
1940 d->pull.tok = s->sys.idx + 1;
1941 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1942 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1943 memcpy(&p[1], &cl[1], 12);
1945 /* Set the verb byte, have to substitute in the valid-bit */
1946 p[0] = cl[0] | s->vdq.valid_bit;
1947 s->vdq.valid_bit ^= QB_VALID_BIT;
1949 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1954 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1957 return qbman_swp_pull_ptr(s, d);
1959 return qbman_swp_pull_cinh_direct(s, d);
1966 #define QMAN_DQRR_PI_MASK 0xf
1968 #define QBMAN_RESULT_DQ 0x60
1969 #define QBMAN_RESULT_FQRN 0x21
1970 #define QBMAN_RESULT_FQRNI 0x22
1971 #define QBMAN_RESULT_FQPN 0x24
1972 #define QBMAN_RESULT_FQDAN 0x25
1973 #define QBMAN_RESULT_CDAN 0x26
1974 #define QBMAN_RESULT_CSCN_MEM 0x27
1975 #define QBMAN_RESULT_CGCU 0x28
1976 #define QBMAN_RESULT_BPSCN 0x29
1977 #define QBMAN_RESULT_CSCN_WQ 0x2a
1979 #include <rte_prefetch.h>
1981 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1983 const struct qbman_result *p;
1985 p = qbman_cena_read_wo_shadow(&s->sys,
1986 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1990 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1991 * only once, so repeated calls can return a sequence of DQRR entries, without
1992 * requiring they be consumed immediately or in any particular order.
1994 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1997 return qbman_swp_dqrr_next_ptr(s);
1999 return qbman_swp_dqrr_next_cinh_direct(s);
2002 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
2005 uint32_t response_verb;
2007 const struct qbman_result *p;
2009 /* Before using valid-bit to detect if something is there, we have to
2010 * handle the case of the DQRR reset bug...
2012 if (s->dqrr.reset_bug) {
2013 /* We pick up new entries by cache-inhibited producer index,
2014 * which means that a non-coherent mapping would require us to
2015 * invalidate and read *only* once that PI has indicated that
2016 * there's an entry here. The first trip around the DQRR ring
2017 * will be much less efficient than all subsequent trips around
2020 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2023 /* there are new entries if pi != next_idx */
2024 if (pi == s->dqrr.next_idx)
2027 /* if next_idx is/was the last ring index, and 'pi' is
2028 * different, we can disable the workaround as all the ring
2029 * entries have now been DMA'd to so valid-bit checking is
2030 * repaired. Note: this logic needs to be based on next_idx
2031 * (which increments one at a time), rather than on pi (which
2032 * can burst and wrap-around between our snapshots of it).
2034 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2035 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2036 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2037 s->dqrr.next_idx, pi);
2038 s->dqrr.reset_bug = 0;
2040 qbman_cena_invalidate_prefetch(&s->sys,
2041 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2043 p = qbman_cena_read_wo_shadow(&s->sys,
2044 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2048 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2049 * in the DQRR reset bug workaround, we shouldn't need to skip these
2050 * check, because we've already determined that a new entry is available
2051 * and we've invalidated the cacheline before reading it, so the
2052 * valid-bit behaviour is repaired and should tell us what we already
2053 * knew from reading PI.
2055 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2058 /* There's something there. Move "next_idx" attention to the next ring
2059 * entry (and prefetch it) before returning what we found.
2062 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2063 s->dqrr.next_idx = 0;
2064 s->dqrr.valid_bit ^= QB_VALID_BIT;
2066 /* If this is the final response to a volatile dequeue command
2067 * indicate that the vdq is no longer busy
2070 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2071 if ((response_verb == QBMAN_RESULT_DQ) &&
2072 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2073 (flags & QBMAN_DQ_STAT_EXPIRED))
2074 atomic_inc(&s->vdq.busy);
2079 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s)
2082 uint32_t response_verb;
2084 const struct qbman_result *p;
2086 /* Before using valid-bit to detect if something is there, we have to
2087 * handle the case of the DQRR reset bug...
2089 if (s->dqrr.reset_bug) {
2090 /* We pick up new entries by cache-inhibited producer index,
2091 * which means that a non-coherent mapping would require us to
2092 * invalidate and read *only* once that PI has indicated that
2093 * there's an entry here. The first trip around the DQRR ring
2094 * will be much less efficient than all subsequent trips around
2097 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2100 /* there are new entries if pi != next_idx */
2101 if (pi == s->dqrr.next_idx)
2104 /* if next_idx is/was the last ring index, and 'pi' is
2105 * different, we can disable the workaround as all the ring
2106 * entries have now been DMA'd to so valid-bit checking is
2107 * repaired. Note: this logic needs to be based on next_idx
2108 * (which increments one at a time), rather than on pi (which
2109 * can burst and wrap-around between our snapshots of it).
2111 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2112 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2113 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2114 s->dqrr.next_idx, pi);
2115 s->dqrr.reset_bug = 0;
2118 p = qbman_cinh_read_wo_shadow(&s->sys,
2119 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2123 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2124 * in the DQRR reset bug workaround, we shouldn't need to skip these
2125 * check, because we've already determined that a new entry is available
2126 * and we've invalidated the cacheline before reading it, so the
2127 * valid-bit behaviour is repaired and should tell us what we already
2128 * knew from reading PI.
2130 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2133 /* There's something there. Move "next_idx" attention to the next ring
2134 * entry (and prefetch it) before returning what we found.
2137 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2138 s->dqrr.next_idx = 0;
2139 s->dqrr.valid_bit ^= QB_VALID_BIT;
2141 /* If this is the final response to a volatile dequeue command
2142 * indicate that the vdq is no longer busy
2145 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2146 if ((response_verb == QBMAN_RESULT_DQ) &&
2147 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2148 (flags & QBMAN_DQ_STAT_EXPIRED))
2149 atomic_inc(&s->vdq.busy);
2154 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
2157 uint32_t response_verb;
2159 const struct qbman_result *p;
2161 p = qbman_cena_read_wo_shadow(&s->sys,
2162 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
2166 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2167 * in the DQRR reset bug workaround, we shouldn't need to skip these
2168 * check, because we've already determined that a new entry is available
2169 * and we've invalidated the cacheline before reading it, so the
2170 * valid-bit behaviour is repaired and should tell us what we already
2171 * knew from reading PI.
2173 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2176 /* There's something there. Move "next_idx" attention to the next ring
2177 * entry (and prefetch it) before returning what we found.
2180 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2181 s->dqrr.next_idx = 0;
2182 s->dqrr.valid_bit ^= QB_VALID_BIT;
2184 /* If this is the final response to a volatile dequeue command
2185 * indicate that the vdq is no longer busy
2188 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2189 if ((response_verb == QBMAN_RESULT_DQ)
2190 && (flags & QBMAN_DQ_STAT_VOLATILE)
2191 && (flags & QBMAN_DQ_STAT_EXPIRED))
2192 atomic_inc(&s->vdq.busy);
2196 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2197 void qbman_swp_dqrr_consume(struct qbman_swp *s,
2198 const struct qbman_result *dq)
2200 qbman_cinh_write(&s->sys,
2201 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
2204 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2205 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
2208 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
2211 /*********************************/
2212 /* Polling user-provided storage */
2213 /*********************************/
2215 int qbman_result_has_new_result(struct qbman_swp *s,
2216 struct qbman_result *dq)
2218 if (dq->dq.tok == 0)
2222 * Set token to be 0 so we will detect change back to 1
2223 * next time the looping is traversed. Const is cast away here
2224 * as we want users to treat the dequeue responses as read only.
2226 ((struct qbman_result *)dq)->dq.tok = 0;
2229 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2230 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2231 * that makes it available. Eg. we may be looking at our 10th dequeue
2232 * result, having released VDQCR after the 1st result and it is now
2233 * busy due to some other command!
2235 if (s->vdq.storage == dq) {
2236 s->vdq.storage = NULL;
2237 atomic_inc(&s->vdq.busy);
2243 int qbman_check_new_result(struct qbman_result *dq)
2245 if (dq->dq.tok == 0)
2249 * Set token to be 0 so we will detect change back to 1
2250 * next time the looping is traversed. Const is cast away here
2251 * as we want users to treat the dequeue responses as read only.
2253 ((struct qbman_result *)dq)->dq.tok = 0;
2258 int qbman_check_command_complete(struct qbman_result *dq)
2260 struct qbman_swp *s;
2262 if (dq->dq.tok == 0)
2265 s = portal_idx_map[dq->dq.tok - 1];
2267 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2268 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2269 * that makes it available. Eg. we may be looking at our 10th dequeue
2270 * result, having released VDQCR after the 1st result and it is now
2271 * busy due to some other command!
2273 if (s->vdq.storage == dq) {
2274 s->vdq.storage = NULL;
2275 atomic_inc(&s->vdq.busy);
2281 /********************************/
2282 /* Categorising qbman results */
2283 /********************************/
2285 static inline int __qbman_result_is_x(const struct qbman_result *dq,
2288 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
2290 return (response_verb == x);
2293 int qbman_result_is_DQ(const struct qbman_result *dq)
2295 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
2298 int qbman_result_is_FQDAN(const struct qbman_result *dq)
2300 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
2303 int qbman_result_is_CDAN(const struct qbman_result *dq)
2305 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
2308 int qbman_result_is_CSCN(const struct qbman_result *dq)
2310 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
2311 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
2314 int qbman_result_is_BPSCN(const struct qbman_result *dq)
2316 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
2319 int qbman_result_is_CGCU(const struct qbman_result *dq)
2321 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
2324 int qbman_result_is_FQRN(const struct qbman_result *dq)
2326 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
2329 int qbman_result_is_FQRNI(const struct qbman_result *dq)
2331 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
2334 int qbman_result_is_FQPN(const struct qbman_result *dq)
2336 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
2339 /*********************************/
2340 /* Parsing frame dequeue results */
2341 /*********************************/
2343 /* These APIs assume qbman_result_is_DQ() is TRUE */
2345 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
2350 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
2352 return dq->dq.seqnum;
2355 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
2357 return dq->dq.oprid;
2360 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
2365 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
2367 return dq->dq.fq_byte_cnt;
2370 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
2372 return dq->dq.fq_frm_cnt;
2375 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
2377 return dq->dq.fqd_ctx;
2380 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
2382 return (const struct qbman_fd *)&dq->dq.fd[0];
2385 /**************************************/
2386 /* Parsing state-change notifications */
2387 /**************************************/
2388 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
2390 return scn->scn.state;
2393 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
2395 return scn->scn.rid_tok;
2398 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
2400 return scn->scn.ctx;
2406 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
2408 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
2411 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
2413 return !(int)(qbman_result_SCN_state(scn) & 0x1);
2416 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
2418 return (int)(qbman_result_SCN_state(scn) & 0x2);
2421 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
2423 return (int)(qbman_result_SCN_state(scn) & 0x4);
2426 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
2428 return qbman_result_SCN_ctx(scn);
2434 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2436 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2439 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2441 return qbman_result_SCN_ctx(scn);
2444 /********************/
2445 /* Parsing EQ RESP */
2446 /********************/
2447 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2449 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2452 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2454 eqresp->eq_resp.rspid = val;
2457 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2459 return eqresp->eq_resp.rspid;
2462 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2464 if (eqresp->eq_resp.rc == 0xE)
2470 /******************/
2471 /* Buffer release */
2472 /******************/
2473 #define QB_BR_RC_VALID_SHIFT 5
2474 #define QB_BR_RCDI_SHIFT 6
2476 void qbman_release_desc_clear(struct qbman_release_desc *d)
2478 memset(d, 0, sizeof(*d));
2479 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2482 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2487 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2490 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2492 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2495 #define RAR_IDX(rar) ((rar) & 0x7)
2496 #define RAR_VB(rar) ((rar) & 0x80)
2497 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2499 static int qbman_swp_release_direct(struct qbman_swp *s,
2500 const struct qbman_release_desc *d,
2501 const uint64_t *buffers,
2502 unsigned int num_buffers)
2505 const uint32_t *cl = qb_cl(d);
2506 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2508 pr_debug("RAR=%08x\n", rar);
2509 if (!RAR_SUCCESS(rar))
2512 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2514 /* Start the release command */
2515 p = qbman_cena_write_start_wo_shadow(&s->sys,
2516 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2518 /* Copy the caller's buffer pointers to the command */
2519 u64_to_le32_copy(&p[2], buffers, num_buffers);
2521 /* Set the verb byte, have to substitute in the valid-bit and the
2522 * number of buffers.
2525 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2526 qbman_cena_write_complete_wo_shadow(&s->sys,
2527 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2532 static int qbman_swp_release_cinh_direct(struct qbman_swp *s,
2533 const struct qbman_release_desc *d,
2534 const uint64_t *buffers,
2535 unsigned int num_buffers)
2538 const uint32_t *cl = qb_cl(d);
2539 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2541 pr_debug("RAR=%08x\n", rar);
2542 if (!RAR_SUCCESS(rar))
2545 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2547 /* Start the release command */
2548 p = qbman_cinh_write_start_wo_shadow(&s->sys,
2549 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2551 /* Copy the caller's buffer pointers to the command */
2552 memcpy_byte_by_byte(&p[2], buffers, num_buffers * sizeof(uint64_t));
2554 /* Set the verb byte, have to substitute in the valid-bit and the
2555 * number of buffers.
2558 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2563 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2564 const struct qbman_release_desc *d,
2565 const uint64_t *buffers,
2566 unsigned int num_buffers)
2569 const uint32_t *cl = qb_cl(d);
2570 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2572 pr_debug("RAR=%08x\n", rar);
2573 if (!RAR_SUCCESS(rar))
2576 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2578 /* Start the release command */
2579 p = qbman_cena_write_start_wo_shadow(&s->sys,
2580 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2582 /* Copy the caller's buffer pointers to the command */
2583 u64_to_le32_copy(&p[2], buffers, num_buffers);
2585 /* Set the verb byte, have to substitute in the valid-bit and the
2586 * number of buffers.
2588 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2590 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2591 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2596 int qbman_swp_release(struct qbman_swp *s,
2597 const struct qbman_release_desc *d,
2598 const uint64_t *buffers,
2599 unsigned int num_buffers)
2602 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2604 return qbman_swp_release_cinh_direct(s, d, buffers,
2608 /*******************/
2609 /* Buffer acquires */
2610 /*******************/
2611 struct qbman_acquire_desc {
2616 uint8_t reserved2[59];
2619 struct qbman_acquire_rslt {
2624 uint8_t reserved2[3];
2628 static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid,
2629 uint64_t *buffers, unsigned int num_buffers)
2631 struct qbman_acquire_desc *p;
2632 struct qbman_acquire_rslt *r;
2634 if (!num_buffers || (num_buffers > 7))
2637 /* Start the management command */
2638 p = qbman_swp_mc_start(s);
2643 /* Encode the caller-provided attributes */
2645 p->num = num_buffers;
2647 /* Complete the management command */
2648 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2650 pr_err("qbman: acquire from BPID %d failed, no response\n",
2655 /* Decode the outcome */
2656 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2658 /* Determine success or failure */
2659 if (r->rslt != QBMAN_MC_RSLT_OK) {
2660 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2665 QBMAN_BUG_ON(r->num > num_buffers);
2667 /* Copy the acquired buffers to the caller's array */
2668 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2673 static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid,
2674 uint64_t *buffers, unsigned int num_buffers)
2676 struct qbman_acquire_desc *p;
2677 struct qbman_acquire_rslt *r;
2679 if (!num_buffers || (num_buffers > 7))
2682 /* Start the management command */
2683 p = qbman_swp_mc_start(s);
2688 /* Encode the caller-provided attributes */
2690 p->num = num_buffers;
2692 /* Complete the management command */
2693 r = qbman_swp_mc_complete_cinh(s, p, QBMAN_MC_ACQUIRE);
2695 pr_err("qbman: acquire from BPID %d failed, no response\n",
2700 /* Decode the outcome */
2701 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2703 /* Determine success or failure */
2704 if (r->rslt != QBMAN_MC_RSLT_OK) {
2705 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2710 QBMAN_BUG_ON(r->num > num_buffers);
2712 /* Copy the acquired buffers to the caller's array */
2713 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2718 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2719 unsigned int num_buffers)
2722 return qbman_swp_acquire_direct(s, bpid, buffers, num_buffers);
2724 return qbman_swp_acquire_cinh_direct(s, bpid, buffers,
2731 struct qbman_alt_fq_state_desc {
2733 uint8_t reserved[3];
2735 uint8_t reserved2[56];
2738 struct qbman_alt_fq_state_rslt {
2741 uint8_t reserved[62];
2744 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2746 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2747 uint8_t alt_fq_verb)
2749 struct qbman_alt_fq_state_desc *p;
2750 struct qbman_alt_fq_state_rslt *r;
2752 /* Start the management command */
2753 p = qbman_swp_mc_start(s);
2757 p->fqid = fqid & ALT_FQ_FQID_MASK;
2759 /* Complete the management command */
2760 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2762 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2767 /* Decode the outcome */
2768 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2770 /* Determine success or failure */
2771 if (r->rslt != QBMAN_MC_RSLT_OK) {
2772 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2773 fqid, alt_fq_verb, r->rslt);
2780 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2782 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2785 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2787 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2790 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2792 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2795 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2797 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2800 /**********************/
2801 /* Channel management */
2802 /**********************/
2804 struct qbman_cdan_ctrl_desc {
2812 uint8_t reserved3[48];
2816 struct qbman_cdan_ctrl_rslt {
2820 uint8_t reserved[60];
2823 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2824 * would be irresponsible to expose it.
2826 #define CODE_CDAN_WE_EN 0x1
2827 #define CODE_CDAN_WE_CTX 0x4
2829 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2830 uint8_t we_mask, uint8_t cdan_en,
2833 struct qbman_cdan_ctrl_desc *p;
2834 struct qbman_cdan_ctrl_rslt *r;
2836 /* Start the management command */
2837 p = qbman_swp_mc_start(s);
2841 /* Encode the caller-provided attributes */
2850 /* Complete the management command */
2851 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2853 pr_err("qbman: wqchan config failed, no response\n");
2857 /* Decode the outcome */
2858 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2859 != QBMAN_WQCHAN_CONFIGURE);
2861 /* Determine success or failure */
2862 if (r->rslt != QBMAN_MC_RSLT_OK) {
2863 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2864 channelid, r->rslt);
2871 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2874 return qbman_swp_CDAN_set(s, channelid,
2879 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2881 return qbman_swp_CDAN_set(s, channelid,
2886 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2888 return qbman_swp_CDAN_set(s, channelid,
2893 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2896 return qbman_swp_CDAN_set(s, channelid,
2897 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2901 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2903 return QBMAN_IDX_FROM_DQRR(dqrr);
2906 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2908 struct qbman_result *dq;
2910 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));