1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2018-2020 NXP
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 uint32_t qman_version;
66 /* Internal Function declaration */
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69 const struct qbman_eq_desc *d,
70 const struct qbman_fd *fd);
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73 const struct qbman_eq_desc *d,
74 const struct qbman_fd *fd);
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78 const struct qbman_eq_desc *d,
79 const struct qbman_fd *fd);
81 qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
82 const struct qbman_eq_desc *d,
83 const struct qbman_fd *fd);
85 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
86 const struct qbman_eq_desc *d,
87 const struct qbman_fd *fd);
89 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
90 const struct qbman_eq_desc *d,
91 const struct qbman_fd *fd);
94 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
95 const struct qbman_eq_desc *d,
96 const struct qbman_fd *fd,
100 qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct qbman_fd *fd,
106 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
107 const struct qbman_eq_desc *d,
108 const struct qbman_fd *fd,
112 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
113 const struct qbman_eq_desc *d,
114 const struct qbman_fd *fd,
119 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 struct qbman_fd **fd,
125 qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
126 const struct qbman_eq_desc *d,
127 struct qbman_fd **fd,
131 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
132 const struct qbman_eq_desc *d,
133 struct qbman_fd **fd,
137 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
138 const struct qbman_eq_desc *d,
139 struct qbman_fd **fd,
144 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
145 const struct qbman_eq_desc *d,
146 const struct qbman_fd *fd,
149 qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
150 const struct qbman_eq_desc *d,
151 const struct qbman_fd *fd,
154 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct qbman_fd *fd,
159 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
160 const struct qbman_eq_desc *d,
161 const struct qbman_fd *fd,
165 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
167 qbman_swp_pull_cinh_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
169 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
171 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
172 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s);
173 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
176 qbman_swp_release_direct(struct qbman_swp *s,
177 const struct qbman_release_desc *d,
178 const uint64_t *buffers, unsigned int num_buffers);
180 qbman_swp_release_cinh_direct(struct qbman_swp *s,
181 const struct qbman_release_desc *d,
182 const uint64_t *buffers, unsigned int num_buffers);
184 qbman_swp_release_mem_back(struct qbman_swp *s,
185 const struct qbman_release_desc *d,
186 const uint64_t *buffers, unsigned int num_buffers);
188 /* Function pointers */
189 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
190 const struct qbman_eq_desc *d,
191 const struct qbman_fd *fd)
192 = qbman_swp_enqueue_array_mode_direct;
194 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
195 const struct qbman_eq_desc *d,
196 const struct qbman_fd *fd)
197 = qbman_swp_enqueue_ring_mode_direct;
199 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
200 const struct qbman_eq_desc *d,
201 const struct qbman_fd *fd,
204 = qbman_swp_enqueue_multiple_direct;
206 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
207 const struct qbman_eq_desc *d,
208 struct qbman_fd **fd,
211 = qbman_swp_enqueue_multiple_fd_direct;
213 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
214 const struct qbman_eq_desc *d,
215 const struct qbman_fd *fd,
217 = qbman_swp_enqueue_multiple_desc_direct;
219 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
220 struct qbman_pull_desc *d)
221 = qbman_swp_pull_direct;
223 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
224 = qbman_swp_dqrr_next_direct;
226 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
227 const struct qbman_release_desc *d,
228 const uint64_t *buffers, unsigned int num_buffers)
229 = qbman_swp_release_direct;
231 /*********************************/
232 /* Portal constructor/destructor */
233 /*********************************/
235 /* Software portals should always be in the power-on state when we initialise,
236 * due to the CCSR-based portal reset functionality that MC has.
238 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
239 * valid-bits, so we need to support a workaround where we don't trust
240 * valid-bits when detecting new entries until any stale ring entries have been
241 * overwritten at least once. The idea is that we read PI for the first few
242 * entries, then switch to valid-bit after that. The trick is to clear the
243 * bug-work-around boolean once the PI wraps around the ring for the first time.
245 * Note: this still carries a slight additional cost once the decrementer hits
248 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
253 struct qbman_swp *p = malloc(sizeof(*p));
258 memset(p, 0, sizeof(struct qbman_swp));
261 #ifdef QBMAN_CHECKING
262 p->mc.check = swp_mc_can_start;
264 p->mc.valid_bit = QB_VALID_BIT;
265 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
266 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
267 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
268 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
269 && (d->cena_access_mode == qman_cena_fastest_access))
270 p->mr.valid_bit = QB_VALID_BIT;
272 atomic_set(&p->vdq.busy, 1);
273 p->vdq.valid_bit = QB_VALID_BIT;
274 p->dqrr.valid_bit = QB_VALID_BIT;
275 qman_version = p->desc.qman_version;
276 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
277 p->dqrr.dqrr_size = 4;
278 p->dqrr.reset_bug = 1;
280 p->dqrr.dqrr_size = 8;
281 p->dqrr.reset_bug = 0;
284 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
287 pr_err("qbman_swp_sys_init() failed %d\n", ret);
291 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
292 * in default state which is an error
294 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
295 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
300 /* SDQCR needs to be initialized to 0 when no channels are
301 * being dequeued from or else the QMan HW will indicate an
302 * error. The values that were calculated above will be
303 * applied when dequeues from a specific channel are enabled.
305 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
307 p->eqcr.pi_ring_size = 8;
308 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
309 && (d->cena_access_mode == qman_cena_fastest_access)) {
310 p->eqcr.pi_ring_size = 32;
311 qbman_swp_enqueue_array_mode_ptr =
312 qbman_swp_enqueue_array_mode_mem_back;
313 qbman_swp_enqueue_ring_mode_ptr =
314 qbman_swp_enqueue_ring_mode_mem_back;
315 qbman_swp_enqueue_multiple_ptr =
316 qbman_swp_enqueue_multiple_mem_back;
317 qbman_swp_enqueue_multiple_fd_ptr =
318 qbman_swp_enqueue_multiple_fd_mem_back;
319 qbman_swp_enqueue_multiple_desc_ptr =
320 qbman_swp_enqueue_multiple_desc_mem_back;
321 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
322 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
323 qbman_swp_release_ptr = qbman_swp_release_mem_back;
326 if (dpaa2_svr_family == SVR_LS1080A) {
327 qbman_swp_enqueue_ring_mode_ptr =
328 qbman_swp_enqueue_ring_mode_cinh_read_direct;
329 qbman_swp_enqueue_multiple_ptr =
330 qbman_swp_enqueue_multiple_cinh_read_direct;
331 qbman_swp_enqueue_multiple_fd_ptr =
332 qbman_swp_enqueue_multiple_fd_cinh_read_direct;
333 qbman_swp_enqueue_multiple_desc_ptr =
334 qbman_swp_enqueue_multiple_desc_cinh_read_direct;
337 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
338 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
339 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
340 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
341 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
342 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
343 && (d->cena_access_mode == qman_cena_fastest_access))
344 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
345 & p->eqcr.pi_ci_mask;
347 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
348 & p->eqcr.pi_ci_mask;
349 p->eqcr.available = p->eqcr.pi_ring_size -
350 qm_cyc_diff(p->eqcr.pi_ring_size,
351 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
352 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
354 portal_idx_map[p->desc.idx] = p;
358 int qbman_swp_update(struct qbman_swp *p, int stash_off)
360 const struct qbman_swp_desc *d = &p->desc;
361 struct qbman_swp_sys *s = &p->sys;
364 /* Nothing needs to be done for QBMAN rev > 5000 with fast access */
365 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
366 && (d->cena_access_mode == qman_cena_fastest_access))
369 ret = qbman_swp_sys_update(s, d, p->dqrr.dqrr_size, stash_off);
371 pr_err("qbman_swp_sys_init() failed %d\n", ret);
375 p->stash_off = stash_off;
380 void qbman_swp_finish(struct qbman_swp *p)
382 #ifdef QBMAN_CHECKING
383 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
385 qbman_swp_sys_finish(&p->sys);
386 portal_idx_map[p->desc.idx] = NULL;
390 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
399 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
401 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
404 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
406 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
409 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
411 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
414 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
416 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
419 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
421 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
424 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
426 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
429 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
431 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
434 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
436 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
439 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
441 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
444 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
446 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
449 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
451 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
454 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
456 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
457 inhibit ? 0xffffffff : 0);
460 /***********************/
461 /* Management commands */
462 /***********************/
465 * Internal code common to all types of management commands.
468 void *qbman_swp_mc_start(struct qbman_swp *p)
471 #ifdef QBMAN_CHECKING
472 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
474 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
475 && (p->desc.cena_access_mode == qman_cena_fastest_access))
476 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
478 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
479 #ifdef QBMAN_CHECKING
481 p->mc.check = swp_mc_can_submit;
486 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
489 #ifdef QBMAN_CHECKING
490 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
492 /* TBD: "|=" is going to hurt performance. Need to move as many fields
493 * out of word zero, and for those that remain, the "OR" needs to occur
494 * at the caller side. This debug check helps to catch cases where the
495 * caller wants to OR but has forgotten to do so.
497 QBMAN_BUG_ON((*v & cmd_verb) != *v);
498 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
499 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
500 *v = cmd_verb | p->mr.valid_bit;
501 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
503 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
506 *v = cmd_verb | p->mc.valid_bit;
507 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
510 #ifdef QBMAN_CHECKING
511 p->mc.check = swp_mc_can_poll;
515 void qbman_swp_mc_submit_cinh(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
518 #ifdef QBMAN_CHECKING
519 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
521 /* TBD: "|=" is going to hurt performance. Need to move as many fields
522 * out of word zero, and for those that remain, the "OR" needs to occur
523 * at the caller side. This debug check helps to catch cases where the
524 * caller wants to OR but has forgotten to do so.
526 QBMAN_BUG_ON((*v & cmd_verb) != *v);
528 *v = cmd_verb | p->mc.valid_bit;
529 qbman_cinh_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
531 #ifdef QBMAN_CHECKING
532 p->mc.check = swp_mc_can_poll;
536 void *qbman_swp_mc_result(struct qbman_swp *p)
539 #ifdef QBMAN_CHECKING
540 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
542 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
543 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
544 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
545 /* Command completed if the valid bit is toggled */
546 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
548 /* Remove the valid-bit -
549 * command completed iff the rest is non-zero
551 verb = ret[0] & ~QB_VALID_BIT;
554 p->mr.valid_bit ^= QB_VALID_BIT;
556 qbman_cena_invalidate_prefetch(&p->sys,
557 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
558 ret = qbman_cena_read(&p->sys,
559 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
560 /* Remove the valid-bit -
561 * command completed iff the rest is non-zero
563 verb = ret[0] & ~QB_VALID_BIT;
566 p->mc.valid_bit ^= QB_VALID_BIT;
568 #ifdef QBMAN_CHECKING
569 p->mc.check = swp_mc_can_start;
574 void *qbman_swp_mc_result_cinh(struct qbman_swp *p)
577 #ifdef QBMAN_CHECKING
578 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
580 ret = qbman_cinh_read_shadow(&p->sys,
581 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
582 /* Remove the valid-bit -
583 * command completed iff the rest is non-zero
585 verb = ret[0] & ~QB_VALID_BIT;
588 p->mc.valid_bit ^= QB_VALID_BIT;
589 #ifdef QBMAN_CHECKING
590 p->mc.check = swp_mc_can_start;
599 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
600 enum qb_enqueue_commands {
602 enqueue_response_always = 1,
603 enqueue_rejects_to_fq = 2
606 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
607 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
608 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
609 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
610 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
611 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
612 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
613 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
615 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
617 memset(d, 0, sizeof(*d));
620 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
622 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
624 d->eq.verb |= enqueue_response_always;
626 d->eq.verb |= enqueue_rejects_to_fq;
629 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
630 uint16_t opr_id, uint16_t seqnum, int incomplete)
632 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
634 d->eq.verb |= enqueue_response_always;
636 d->eq.verb |= enqueue_rejects_to_fq;
638 d->eq.orpid = opr_id;
639 d->eq.seqnum = seqnum;
641 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
643 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
646 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
649 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
650 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
651 d->eq.orpid = opr_id;
652 d->eq.seqnum = seqnum;
653 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
654 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
657 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
660 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
661 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
662 d->eq.orpid = opr_id;
663 d->eq.seqnum = seqnum;
664 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
665 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
668 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
669 dma_addr_t storage_phys,
672 d->eq.rsp_addr = storage_phys;
676 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
681 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
683 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
687 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
688 uint16_t qd_bin, uint8_t qd_prio)
690 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
692 d->eq.qdbin = qd_bin;
693 d->eq.qpri = qd_prio;
696 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
699 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
701 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
704 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
705 uint8_t dqrr_idx, int park)
708 d->eq.dca = dqrr_idx;
710 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
712 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
713 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
715 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
719 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
720 #define EQAR_VB(eqar) ((eqar) & 0x80)
721 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
723 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
727 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
730 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
735 static void memcpy_byte_by_byte(void *to, const void *from, size_t n)
737 const uint8_t *src = from;
738 volatile uint8_t *dest = to;
741 for (i = 0; i < n; i++)
746 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
747 const struct qbman_eq_desc *d,
748 const struct qbman_fd *fd)
751 const uint32_t *cl = qb_cl(d);
752 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
754 pr_debug("EQAR=%08x\n", eqar);
755 if (!EQAR_SUCCESS(eqar))
757 p = qbman_cena_write_start_wo_shadow(&s->sys,
758 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
759 memcpy(&p[1], &cl[1], 28);
760 memcpy(&p[8], fd, sizeof(*fd));
762 /* Set the verb byte, have to substitute in the valid-bit */
764 p[0] = cl[0] | EQAR_VB(eqar);
765 qbman_cena_write_complete_wo_shadow(&s->sys,
766 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
769 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
770 const struct qbman_eq_desc *d,
771 const struct qbman_fd *fd)
774 const uint32_t *cl = qb_cl(d);
775 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
777 pr_debug("EQAR=%08x\n", eqar);
778 if (!EQAR_SUCCESS(eqar))
780 p = qbman_cena_write_start_wo_shadow(&s->sys,
781 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
782 memcpy(&p[1], &cl[1], 28);
783 memcpy(&p[8], fd, sizeof(*fd));
785 /* Set the verb byte, have to substitute in the valid-bit */
786 p[0] = cl[0] | EQAR_VB(eqar);
788 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
792 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
793 const struct qbman_eq_desc *d,
794 const struct qbman_fd *fd)
796 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
799 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
800 const struct qbman_eq_desc *d,
801 const struct qbman_fd *fd)
804 const uint32_t *cl = qb_cl(d);
805 uint32_t eqcr_ci, full_mask, half_mask;
807 half_mask = (s->eqcr.pi_ci_mask>>1);
808 full_mask = s->eqcr.pi_ci_mask;
809 if (!s->eqcr.available) {
810 eqcr_ci = s->eqcr.ci;
811 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
812 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
813 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
814 eqcr_ci, s->eqcr.ci);
815 if (!s->eqcr.available)
819 p = qbman_cena_write_start_wo_shadow(&s->sys,
820 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
821 memcpy(&p[1], &cl[1], 28);
822 memcpy(&p[8], fd, sizeof(*fd));
825 /* Set the verb byte, have to substitute in the valid-bit */
826 p[0] = cl[0] | s->eqcr.pi_vb;
827 qbman_cena_write_complete_wo_shadow(&s->sys,
828 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
830 s->eqcr.pi &= full_mask;
832 if (!(s->eqcr.pi & half_mask))
833 s->eqcr.pi_vb ^= QB_VALID_BIT;
838 static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
840 const struct qbman_eq_desc *d,
841 const struct qbman_fd *fd)
844 const uint32_t *cl = qb_cl(d);
845 uint32_t eqcr_ci, full_mask, half_mask;
847 half_mask = (s->eqcr.pi_ci_mask>>1);
848 full_mask = s->eqcr.pi_ci_mask;
849 if (!s->eqcr.available) {
850 eqcr_ci = s->eqcr.ci;
851 s->eqcr.ci = qbman_cinh_read(&s->sys,
852 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
853 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
854 eqcr_ci, s->eqcr.ci);
855 if (!s->eqcr.available)
859 p = qbman_cinh_write_start_wo_shadow(&s->sys,
860 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
861 memcpy(&p[1], &cl[1], 28);
862 memcpy(&p[8], fd, sizeof(*fd));
865 /* Set the verb byte, have to substitute in the valid-bit */
866 p[0] = cl[0] | s->eqcr.pi_vb;
868 s->eqcr.pi &= full_mask;
870 if (!(s->eqcr.pi & half_mask))
871 s->eqcr.pi_vb ^= QB_VALID_BIT;
876 static int qbman_swp_enqueue_ring_mode_cinh_direct(
878 const struct qbman_eq_desc *d,
879 const struct qbman_fd *fd)
882 const uint32_t *cl = qb_cl(d);
883 uint32_t eqcr_ci, full_mask, half_mask;
885 half_mask = (s->eqcr.pi_ci_mask>>1);
886 full_mask = s->eqcr.pi_ci_mask;
887 if (!s->eqcr.available) {
888 eqcr_ci = s->eqcr.ci;
889 s->eqcr.ci = qbman_cinh_read(&s->sys,
890 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
891 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
892 eqcr_ci, s->eqcr.ci);
893 if (!s->eqcr.available)
897 p = qbman_cinh_write_start_wo_shadow(&s->sys,
898 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
899 memcpy_byte_by_byte(&p[1], &cl[1], 28);
900 memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
903 /* Set the verb byte, have to substitute in the valid-bit */
904 p[0] = cl[0] | s->eqcr.pi_vb;
906 s->eqcr.pi &= full_mask;
908 if (!(s->eqcr.pi & half_mask))
909 s->eqcr.pi_vb ^= QB_VALID_BIT;
914 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
915 const struct qbman_eq_desc *d,
916 const struct qbman_fd *fd)
919 const uint32_t *cl = qb_cl(d);
920 uint32_t eqcr_ci, full_mask, half_mask;
922 half_mask = (s->eqcr.pi_ci_mask>>1);
923 full_mask = s->eqcr.pi_ci_mask;
924 if (!s->eqcr.available) {
925 eqcr_ci = s->eqcr.ci;
926 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
927 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
928 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
929 eqcr_ci, s->eqcr.ci);
930 if (!s->eqcr.available)
934 p = qbman_cena_write_start_wo_shadow(&s->sys,
935 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
936 memcpy(&p[1], &cl[1], 28);
937 memcpy(&p[8], fd, sizeof(*fd));
939 /* Set the verb byte, have to substitute in the valid-bit */
940 p[0] = cl[0] | s->eqcr.pi_vb;
942 s->eqcr.pi &= full_mask;
944 if (!(s->eqcr.pi & half_mask))
945 s->eqcr.pi_vb ^= QB_VALID_BIT;
947 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
948 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
952 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
953 const struct qbman_eq_desc *d,
954 const struct qbman_fd *fd)
957 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
959 return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
962 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
963 const struct qbman_fd *fd)
965 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
966 return qbman_swp_enqueue_array_mode(s, d, fd);
967 else /* Use ring mode by default */
968 return qbman_swp_enqueue_ring_mode(s, d, fd);
971 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
972 const struct qbman_eq_desc *d,
973 const struct qbman_fd *fd,
978 const uint32_t *cl = qb_cl(d);
979 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
980 int i, num_enqueued = 0;
983 half_mask = (s->eqcr.pi_ci_mask>>1);
984 full_mask = s->eqcr.pi_ci_mask;
985 if (!s->eqcr.available) {
986 eqcr_ci = s->eqcr.ci;
987 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
988 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
989 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
990 eqcr_ci, s->eqcr.ci);
991 if (!s->eqcr.available)
995 eqcr_pi = s->eqcr.pi;
996 num_enqueued = (s->eqcr.available < num_frames) ?
997 s->eqcr.available : num_frames;
998 s->eqcr.available -= num_enqueued;
999 /* Fill in the EQCR ring */
1000 for (i = 0; i < num_enqueued; i++) {
1001 p = qbman_cena_write_start_wo_shadow(&s->sys,
1002 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1003 memcpy(&p[1], &cl[1], 28);
1004 memcpy(&p[8], &fd[i], sizeof(*fd));
1010 /* Set the verb byte, have to substitute in the valid-bit */
1011 eqcr_pi = s->eqcr.pi;
1012 for (i = 0; i < num_enqueued; i++) {
1013 p = qbman_cena_write_start_wo_shadow(&s->sys,
1014 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1015 p[0] = cl[0] | s->eqcr.pi_vb;
1016 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1017 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1019 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1020 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1023 if (!(eqcr_pi & half_mask))
1024 s->eqcr.pi_vb ^= QB_VALID_BIT;
1027 /* Flush all the cacheline without load/store in between */
1028 eqcr_pi = s->eqcr.pi;
1029 addr_cena = (size_t)s->sys.addr_cena;
1030 for (i = 0; i < num_enqueued; i++) {
1031 dcbf((uintptr_t)(addr_cena +
1032 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1035 s->eqcr.pi = eqcr_pi & full_mask;
1037 return num_enqueued;
1040 static int qbman_swp_enqueue_multiple_cinh_read_direct(
1041 struct qbman_swp *s,
1042 const struct qbman_eq_desc *d,
1043 const struct qbman_fd *fd,
1048 const uint32_t *cl = qb_cl(d);
1049 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1050 int i, num_enqueued = 0;
1053 half_mask = (s->eqcr.pi_ci_mask>>1);
1054 full_mask = s->eqcr.pi_ci_mask;
1055 if (!s->eqcr.available) {
1056 eqcr_ci = s->eqcr.ci;
1057 s->eqcr.ci = qbman_cinh_read(&s->sys,
1058 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1059 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1060 eqcr_ci, s->eqcr.ci);
1061 if (!s->eqcr.available)
1065 eqcr_pi = s->eqcr.pi;
1066 num_enqueued = (s->eqcr.available < num_frames) ?
1067 s->eqcr.available : num_frames;
1068 s->eqcr.available -= num_enqueued;
1069 /* Fill in the EQCR ring */
1070 for (i = 0; i < num_enqueued; i++) {
1071 p = qbman_cena_write_start_wo_shadow(&s->sys,
1072 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1073 memcpy(&p[1], &cl[1], 28);
1074 memcpy(&p[8], &fd[i], sizeof(*fd));
1080 /* Set the verb byte, have to substitute in the valid-bit */
1081 eqcr_pi = s->eqcr.pi;
1082 for (i = 0; i < num_enqueued; i++) {
1083 p = qbman_cena_write_start_wo_shadow(&s->sys,
1084 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1085 p[0] = cl[0] | s->eqcr.pi_vb;
1086 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1087 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1089 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1090 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1093 if (!(eqcr_pi & half_mask))
1094 s->eqcr.pi_vb ^= QB_VALID_BIT;
1097 /* Flush all the cacheline without load/store in between */
1098 eqcr_pi = s->eqcr.pi;
1099 addr_cena = (size_t)s->sys.addr_cena;
1100 for (i = 0; i < num_enqueued; i++) {
1102 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1105 s->eqcr.pi = eqcr_pi & full_mask;
1107 return num_enqueued;
1110 static int qbman_swp_enqueue_multiple_cinh_direct(
1111 struct qbman_swp *s,
1112 const struct qbman_eq_desc *d,
1113 const struct qbman_fd *fd,
1118 const uint32_t *cl = qb_cl(d);
1119 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1120 int i, num_enqueued = 0;
1122 half_mask = (s->eqcr.pi_ci_mask>>1);
1123 full_mask = s->eqcr.pi_ci_mask;
1124 if (!s->eqcr.available) {
1125 eqcr_ci = s->eqcr.ci;
1126 s->eqcr.ci = qbman_cinh_read(&s->sys,
1127 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1128 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1129 eqcr_ci, s->eqcr.ci);
1130 if (!s->eqcr.available)
1134 eqcr_pi = s->eqcr.pi;
1135 num_enqueued = (s->eqcr.available < num_frames) ?
1136 s->eqcr.available : num_frames;
1137 s->eqcr.available -= num_enqueued;
1138 /* Fill in the EQCR ring */
1139 for (i = 0; i < num_enqueued; i++) {
1140 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1141 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1142 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1143 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1149 /* Set the verb byte, have to substitute in the valid-bit */
1150 eqcr_pi = s->eqcr.pi;
1151 for (i = 0; i < num_enqueued; i++) {
1152 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1153 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1154 p[0] = cl[0] | s->eqcr.pi_vb;
1155 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1156 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1158 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1159 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1162 if (!(eqcr_pi & half_mask))
1163 s->eqcr.pi_vb ^= QB_VALID_BIT;
1166 s->eqcr.pi = eqcr_pi & full_mask;
1168 return num_enqueued;
1171 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
1172 const struct qbman_eq_desc *d,
1173 const struct qbman_fd *fd,
1178 const uint32_t *cl = qb_cl(d);
1179 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1180 int i, num_enqueued = 0;
1182 half_mask = (s->eqcr.pi_ci_mask>>1);
1183 full_mask = s->eqcr.pi_ci_mask;
1184 if (!s->eqcr.available) {
1185 eqcr_ci = s->eqcr.ci;
1186 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1187 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1188 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1189 eqcr_ci, s->eqcr.ci);
1190 if (!s->eqcr.available)
1194 eqcr_pi = s->eqcr.pi;
1195 num_enqueued = (s->eqcr.available < num_frames) ?
1196 s->eqcr.available : num_frames;
1197 s->eqcr.available -= num_enqueued;
1198 /* Fill in the EQCR ring */
1199 for (i = 0; i < num_enqueued; i++) {
1200 p = qbman_cena_write_start_wo_shadow(&s->sys,
1201 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1202 memcpy(&p[1], &cl[1], 28);
1203 memcpy(&p[8], &fd[i], sizeof(*fd));
1204 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1205 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1207 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1208 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1211 p[0] = cl[0] | s->eqcr.pi_vb;
1213 if (!(eqcr_pi & half_mask))
1214 s->eqcr.pi_vb ^= QB_VALID_BIT;
1216 s->eqcr.pi = eqcr_pi & full_mask;
1219 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1220 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1221 return num_enqueued;
1224 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1225 const struct qbman_eq_desc *d,
1226 const struct qbman_fd *fd,
1231 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags,
1234 return qbman_swp_enqueue_multiple_cinh_direct(s, d, fd, flags,
1238 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1239 const struct qbman_eq_desc *d,
1240 struct qbman_fd **fd,
1245 const uint32_t *cl = qb_cl(d);
1246 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1247 int i, num_enqueued = 0;
1250 half_mask = (s->eqcr.pi_ci_mask>>1);
1251 full_mask = s->eqcr.pi_ci_mask;
1252 if (!s->eqcr.available) {
1253 eqcr_ci = s->eqcr.ci;
1254 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1255 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1256 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1257 eqcr_ci, s->eqcr.ci);
1258 if (!s->eqcr.available)
1262 eqcr_pi = s->eqcr.pi;
1263 num_enqueued = (s->eqcr.available < num_frames) ?
1264 s->eqcr.available : num_frames;
1265 s->eqcr.available -= num_enqueued;
1266 /* Fill in the EQCR ring */
1267 for (i = 0; i < num_enqueued; i++) {
1268 p = qbman_cena_write_start_wo_shadow(&s->sys,
1269 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1270 memcpy(&p[1], &cl[1], 28);
1271 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1277 /* Set the verb byte, have to substitute in the valid-bit */
1278 eqcr_pi = s->eqcr.pi;
1279 for (i = 0; i < num_enqueued; i++) {
1280 p = qbman_cena_write_start_wo_shadow(&s->sys,
1281 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1282 p[0] = cl[0] | s->eqcr.pi_vb;
1283 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1284 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1286 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1287 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1290 if (!(eqcr_pi & half_mask))
1291 s->eqcr.pi_vb ^= QB_VALID_BIT;
1294 /* Flush all the cacheline without load/store in between */
1295 eqcr_pi = s->eqcr.pi;
1296 addr_cena = (size_t)s->sys.addr_cena;
1297 for (i = 0; i < num_enqueued; i++) {
1299 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1302 s->eqcr.pi = eqcr_pi & full_mask;
1304 return num_enqueued;
1307 static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
1308 struct qbman_swp *s,
1309 const struct qbman_eq_desc *d,
1310 struct qbman_fd **fd,
1315 const uint32_t *cl = qb_cl(d);
1316 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1317 int i, num_enqueued = 0;
1320 half_mask = (s->eqcr.pi_ci_mask>>1);
1321 full_mask = s->eqcr.pi_ci_mask;
1322 if (!s->eqcr.available) {
1323 eqcr_ci = s->eqcr.ci;
1324 s->eqcr.ci = qbman_cinh_read(&s->sys,
1325 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1326 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1327 eqcr_ci, s->eqcr.ci);
1328 if (!s->eqcr.available)
1332 eqcr_pi = s->eqcr.pi;
1333 num_enqueued = (s->eqcr.available < num_frames) ?
1334 s->eqcr.available : num_frames;
1335 s->eqcr.available -= num_enqueued;
1336 /* Fill in the EQCR ring */
1337 for (i = 0; i < num_enqueued; i++) {
1338 p = qbman_cena_write_start_wo_shadow(&s->sys,
1339 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1340 memcpy(&p[1], &cl[1], 28);
1341 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1347 /* Set the verb byte, have to substitute in the valid-bit */
1348 eqcr_pi = s->eqcr.pi;
1349 for (i = 0; i < num_enqueued; i++) {
1350 p = qbman_cena_write_start_wo_shadow(&s->sys,
1351 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1352 p[0] = cl[0] | s->eqcr.pi_vb;
1353 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1354 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1356 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1357 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1360 if (!(eqcr_pi & half_mask))
1361 s->eqcr.pi_vb ^= QB_VALID_BIT;
1364 /* Flush all the cacheline without load/store in between */
1365 eqcr_pi = s->eqcr.pi;
1366 addr_cena = (size_t)s->sys.addr_cena;
1367 for (i = 0; i < num_enqueued; i++) {
1369 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1372 s->eqcr.pi = eqcr_pi & full_mask;
1374 return num_enqueued;
1377 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1378 struct qbman_swp *s,
1379 const struct qbman_eq_desc *d,
1380 struct qbman_fd **fd,
1385 const uint32_t *cl = qb_cl(d);
1386 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1387 int i, num_enqueued = 0;
1389 half_mask = (s->eqcr.pi_ci_mask>>1);
1390 full_mask = s->eqcr.pi_ci_mask;
1391 if (!s->eqcr.available) {
1392 eqcr_ci = s->eqcr.ci;
1393 s->eqcr.ci = qbman_cinh_read(&s->sys,
1394 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1395 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1396 eqcr_ci, s->eqcr.ci);
1397 if (!s->eqcr.available)
1401 eqcr_pi = s->eqcr.pi;
1402 num_enqueued = (s->eqcr.available < num_frames) ?
1403 s->eqcr.available : num_frames;
1404 s->eqcr.available -= num_enqueued;
1405 /* Fill in the EQCR ring */
1406 for (i = 0; i < num_enqueued; i++) {
1407 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1408 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1409 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1410 memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
1416 /* Set the verb byte, have to substitute in the valid-bit */
1417 eqcr_pi = s->eqcr.pi;
1418 for (i = 0; i < num_enqueued; i++) {
1419 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1420 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1421 p[0] = cl[0] | s->eqcr.pi_vb;
1422 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1423 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1425 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1426 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1429 if (!(eqcr_pi & half_mask))
1430 s->eqcr.pi_vb ^= QB_VALID_BIT;
1433 s->eqcr.pi = eqcr_pi & full_mask;
1435 return num_enqueued;
1438 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1439 const struct qbman_eq_desc *d,
1440 struct qbman_fd **fd,
1445 const uint32_t *cl = qb_cl(d);
1446 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1447 int i, num_enqueued = 0;
1449 half_mask = (s->eqcr.pi_ci_mask>>1);
1450 full_mask = s->eqcr.pi_ci_mask;
1451 if (!s->eqcr.available) {
1452 eqcr_ci = s->eqcr.ci;
1453 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1454 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1455 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1456 eqcr_ci, s->eqcr.ci);
1457 if (!s->eqcr.available)
1461 eqcr_pi = s->eqcr.pi;
1462 num_enqueued = (s->eqcr.available < num_frames) ?
1463 s->eqcr.available : num_frames;
1464 s->eqcr.available -= num_enqueued;
1465 /* Fill in the EQCR ring */
1466 for (i = 0; i < num_enqueued; i++) {
1467 p = qbman_cena_write_start_wo_shadow(&s->sys,
1468 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1469 memcpy(&p[1], &cl[1], 28);
1470 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1474 /* Set the verb byte, have to substitute in the valid-bit */
1475 eqcr_pi = s->eqcr.pi;
1476 for (i = 0; i < num_enqueued; i++) {
1477 p = qbman_cena_write_start_wo_shadow(&s->sys,
1478 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1479 p[0] = cl[0] | s->eqcr.pi_vb;
1480 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1481 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1483 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1484 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1487 if (!(eqcr_pi & half_mask))
1488 s->eqcr.pi_vb ^= QB_VALID_BIT;
1490 s->eqcr.pi = eqcr_pi & full_mask;
1493 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1494 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1495 return num_enqueued;
1498 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1499 const struct qbman_eq_desc *d,
1500 struct qbman_fd **fd,
1505 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags,
1508 return qbman_swp_enqueue_multiple_fd_cinh_direct(s, d, fd,
1512 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1513 const struct qbman_eq_desc *d,
1514 const struct qbman_fd *fd,
1519 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1520 int i, num_enqueued = 0;
1523 half_mask = (s->eqcr.pi_ci_mask>>1);
1524 full_mask = s->eqcr.pi_ci_mask;
1525 if (!s->eqcr.available) {
1526 eqcr_ci = s->eqcr.ci;
1527 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1528 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1529 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1530 eqcr_ci, s->eqcr.ci);
1531 if (!s->eqcr.available)
1535 eqcr_pi = s->eqcr.pi;
1536 num_enqueued = (s->eqcr.available < num_frames) ?
1537 s->eqcr.available : num_frames;
1538 s->eqcr.available -= num_enqueued;
1539 /* Fill in the EQCR ring */
1540 for (i = 0; i < num_enqueued; i++) {
1541 p = qbman_cena_write_start_wo_shadow(&s->sys,
1542 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1544 memcpy(&p[1], &cl[1], 28);
1545 memcpy(&p[8], &fd[i], sizeof(*fd));
1551 /* Set the verb byte, have to substitute in the valid-bit */
1552 eqcr_pi = s->eqcr.pi;
1553 for (i = 0; i < num_enqueued; i++) {
1554 p = qbman_cena_write_start_wo_shadow(&s->sys,
1555 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1557 p[0] = cl[0] | s->eqcr.pi_vb;
1559 if (!(eqcr_pi & half_mask))
1560 s->eqcr.pi_vb ^= QB_VALID_BIT;
1563 /* Flush all the cacheline without load/store in between */
1564 eqcr_pi = s->eqcr.pi;
1565 addr_cena = (size_t)s->sys.addr_cena;
1566 for (i = 0; i < num_enqueued; i++) {
1567 dcbf((uintptr_t)(addr_cena +
1568 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1571 s->eqcr.pi = eqcr_pi & full_mask;
1573 return num_enqueued;
1576 static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
1577 struct qbman_swp *s,
1578 const struct qbman_eq_desc *d,
1579 const struct qbman_fd *fd,
1584 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1585 int i, num_enqueued = 0;
1588 half_mask = (s->eqcr.pi_ci_mask>>1);
1589 full_mask = s->eqcr.pi_ci_mask;
1590 if (!s->eqcr.available) {
1591 eqcr_ci = s->eqcr.ci;
1592 s->eqcr.ci = qbman_cinh_read(&s->sys,
1593 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1594 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1595 eqcr_ci, s->eqcr.ci);
1596 if (!s->eqcr.available)
1600 eqcr_pi = s->eqcr.pi;
1601 num_enqueued = (s->eqcr.available < num_frames) ?
1602 s->eqcr.available : num_frames;
1603 s->eqcr.available -= num_enqueued;
1604 /* Fill in the EQCR ring */
1605 for (i = 0; i < num_enqueued; i++) {
1606 p = qbman_cena_write_start_wo_shadow(&s->sys,
1607 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1609 memcpy(&p[1], &cl[1], 28);
1610 memcpy(&p[8], &fd[i], sizeof(*fd));
1616 /* Set the verb byte, have to substitute in the valid-bit */
1617 eqcr_pi = s->eqcr.pi;
1618 for (i = 0; i < num_enqueued; i++) {
1619 p = qbman_cena_write_start_wo_shadow(&s->sys,
1620 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1622 p[0] = cl[0] | s->eqcr.pi_vb;
1624 if (!(eqcr_pi & half_mask))
1625 s->eqcr.pi_vb ^= QB_VALID_BIT;
1628 /* Flush all the cacheline without load/store in between */
1629 eqcr_pi = s->eqcr.pi;
1630 addr_cena = (size_t)s->sys.addr_cena;
1631 for (i = 0; i < num_enqueued; i++) {
1633 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1636 s->eqcr.pi = eqcr_pi & full_mask;
1638 return num_enqueued;
1641 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1642 struct qbman_swp *s,
1643 const struct qbman_eq_desc *d,
1644 const struct qbman_fd *fd,
1649 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1650 int i, num_enqueued = 0;
1652 half_mask = (s->eqcr.pi_ci_mask>>1);
1653 full_mask = s->eqcr.pi_ci_mask;
1654 if (!s->eqcr.available) {
1655 eqcr_ci = s->eqcr.ci;
1656 s->eqcr.ci = qbman_cinh_read(&s->sys,
1657 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1658 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1659 eqcr_ci, s->eqcr.ci);
1660 if (!s->eqcr.available)
1664 eqcr_pi = s->eqcr.pi;
1665 num_enqueued = (s->eqcr.available < num_frames) ?
1666 s->eqcr.available : num_frames;
1667 s->eqcr.available -= num_enqueued;
1668 /* Fill in the EQCR ring */
1669 for (i = 0; i < num_enqueued; i++) {
1670 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1671 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1673 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1674 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1680 /* Set the verb byte, have to substitute in the valid-bit */
1681 eqcr_pi = s->eqcr.pi;
1682 for (i = 0; i < num_enqueued; i++) {
1683 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1684 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1686 p[0] = cl[0] | s->eqcr.pi_vb;
1688 if (!(eqcr_pi & half_mask))
1689 s->eqcr.pi_vb ^= QB_VALID_BIT;
1692 s->eqcr.pi = eqcr_pi & full_mask;
1694 return num_enqueued;
1697 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1698 const struct qbman_eq_desc *d,
1699 const struct qbman_fd *fd,
1704 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1705 int i, num_enqueued = 0;
1707 half_mask = (s->eqcr.pi_ci_mask>>1);
1708 full_mask = s->eqcr.pi_ci_mask;
1709 if (!s->eqcr.available) {
1710 eqcr_ci = s->eqcr.ci;
1711 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1712 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1713 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1714 eqcr_ci, s->eqcr.ci);
1715 if (!s->eqcr.available)
1719 eqcr_pi = s->eqcr.pi;
1720 num_enqueued = (s->eqcr.available < num_frames) ?
1721 s->eqcr.available : num_frames;
1722 s->eqcr.available -= num_enqueued;
1723 /* Fill in the EQCR ring */
1724 for (i = 0; i < num_enqueued; i++) {
1725 p = qbman_cena_write_start_wo_shadow(&s->sys,
1726 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1728 memcpy(&p[1], &cl[1], 28);
1729 memcpy(&p[8], &fd[i], sizeof(*fd));
1733 /* Set the verb byte, have to substitute in the valid-bit */
1734 eqcr_pi = s->eqcr.pi;
1735 for (i = 0; i < num_enqueued; i++) {
1736 p = qbman_cena_write_start_wo_shadow(&s->sys,
1737 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1739 p[0] = cl[0] | s->eqcr.pi_vb;
1741 if (!(eqcr_pi & half_mask))
1742 s->eqcr.pi_vb ^= QB_VALID_BIT;
1745 s->eqcr.pi = eqcr_pi & full_mask;
1748 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1749 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1751 return num_enqueued;
1753 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1754 const struct qbman_eq_desc *d,
1755 const struct qbman_fd *fd,
1759 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd,
1762 return qbman_swp_enqueue_multiple_desc_cinh_direct(s, d, fd,
1767 /*************************/
1768 /* Static (push) dequeue */
1769 /*************************/
1771 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1773 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1775 QBMAN_BUG_ON(channel_idx > 15);
1776 *enabled = src | (1 << channel_idx);
1779 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1783 QBMAN_BUG_ON(channel_idx > 15);
1785 s->sdq |= 1 << channel_idx;
1787 s->sdq &= ~(1 << channel_idx);
1789 /* Read make the complete src map. If no channels are enabled
1790 * the SDQCR must be 0 or else QMan will assert errors
1792 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1794 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1796 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1799 /***************************/
1800 /* Volatile (pull) dequeue */
1801 /***************************/
1803 /* These should be const, eventually */
1804 #define QB_VDQCR_VERB_DCT_SHIFT 0
1805 #define QB_VDQCR_VERB_DT_SHIFT 2
1806 #define QB_VDQCR_VERB_RLS_SHIFT 4
1807 #define QB_VDQCR_VERB_WAE_SHIFT 5
1808 #define QB_VDQCR_VERB_RAD_SHIFT 6
1812 qb_pull_dt_workqueue,
1813 qb_pull_dt_framequeue
1816 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1818 memset(d, 0, sizeof(*d));
1821 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1822 struct qbman_result *storage,
1823 dma_addr_t storage_phys,
1826 d->pull.rsp_addr_virt = (size_t)storage;
1829 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1832 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1834 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1836 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1838 d->pull.rsp_addr = storage_phys;
1841 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1844 d->pull.numf = numframes - 1;
1847 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1849 d->pull.tok = token;
1852 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1854 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1855 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1856 d->pull.dq_src = fqid;
1859 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1860 enum qbman_pull_type_e dct)
1862 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1863 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1864 d->pull.dq_src = wqid;
1867 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1868 enum qbman_pull_type_e dct)
1870 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1871 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1872 d->pull.dq_src = chid;
1875 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1877 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1879 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1881 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1883 printf("The RAD feature is not valid when RLS = 0\n");
1887 static int qbman_swp_pull_direct(struct qbman_swp *s,
1888 struct qbman_pull_desc *d)
1891 uint32_t *cl = qb_cl(d);
1893 if (!atomic_dec_and_test(&s->vdq.busy)) {
1894 atomic_inc(&s->vdq.busy);
1898 d->pull.tok = s->sys.idx + 1;
1899 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1900 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1901 memcpy(&p[1], &cl[1], 12);
1903 /* Set the verb byte, have to substitute in the valid-bit */
1905 p[0] = cl[0] | s->vdq.valid_bit;
1906 s->vdq.valid_bit ^= QB_VALID_BIT;
1907 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1912 static int qbman_swp_pull_cinh_direct(struct qbman_swp *s,
1913 struct qbman_pull_desc *d)
1916 uint32_t *cl = qb_cl(d);
1918 if (!atomic_dec_and_test(&s->vdq.busy)) {
1919 atomic_inc(&s->vdq.busy);
1923 d->pull.tok = s->sys.idx + 1;
1924 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1925 p = qbman_cinh_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1926 memcpy_byte_by_byte(&p[1], &cl[1], 12);
1928 /* Set the verb byte, have to substitute in the valid-bit */
1930 p[0] = cl[0] | s->vdq.valid_bit;
1931 s->vdq.valid_bit ^= QB_VALID_BIT;
1936 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1937 struct qbman_pull_desc *d)
1940 uint32_t *cl = qb_cl(d);
1942 if (!atomic_dec_and_test(&s->vdq.busy)) {
1943 atomic_inc(&s->vdq.busy);
1947 d->pull.tok = s->sys.idx + 1;
1948 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1949 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1950 memcpy(&p[1], &cl[1], 12);
1952 /* Set the verb byte, have to substitute in the valid-bit */
1953 p[0] = cl[0] | s->vdq.valid_bit;
1954 s->vdq.valid_bit ^= QB_VALID_BIT;
1956 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1961 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1964 return qbman_swp_pull_ptr(s, d);
1966 return qbman_swp_pull_cinh_direct(s, d);
1973 #define QMAN_DQRR_PI_MASK 0xf
1975 #define QBMAN_RESULT_DQ 0x60
1976 #define QBMAN_RESULT_FQRN 0x21
1977 #define QBMAN_RESULT_FQRNI 0x22
1978 #define QBMAN_RESULT_FQPN 0x24
1979 #define QBMAN_RESULT_FQDAN 0x25
1980 #define QBMAN_RESULT_CDAN 0x26
1981 #define QBMAN_RESULT_CSCN_MEM 0x27
1982 #define QBMAN_RESULT_CGCU 0x28
1983 #define QBMAN_RESULT_BPSCN 0x29
1984 #define QBMAN_RESULT_CSCN_WQ 0x2a
1986 #include <rte_prefetch.h>
1988 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1990 const struct qbman_result *p;
1992 p = qbman_cena_read_wo_shadow(&s->sys,
1993 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1997 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1998 * only once, so repeated calls can return a sequence of DQRR entries, without
1999 * requiring they be consumed immediately or in any particular order.
2001 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
2004 return qbman_swp_dqrr_next_ptr(s);
2006 return qbman_swp_dqrr_next_cinh_direct(s);
2009 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
2012 uint32_t response_verb;
2014 const struct qbman_result *p;
2016 /* Before using valid-bit to detect if something is there, we have to
2017 * handle the case of the DQRR reset bug...
2019 if (s->dqrr.reset_bug) {
2020 /* We pick up new entries by cache-inhibited producer index,
2021 * which means that a non-coherent mapping would require us to
2022 * invalidate and read *only* once that PI has indicated that
2023 * there's an entry here. The first trip around the DQRR ring
2024 * will be much less efficient than all subsequent trips around
2027 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2030 /* there are new entries if pi != next_idx */
2031 if (pi == s->dqrr.next_idx)
2034 /* if next_idx is/was the last ring index, and 'pi' is
2035 * different, we can disable the workaround as all the ring
2036 * entries have now been DMA'd to so valid-bit checking is
2037 * repaired. Note: this logic needs to be based on next_idx
2038 * (which increments one at a time), rather than on pi (which
2039 * can burst and wrap-around between our snapshots of it).
2041 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2042 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2043 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2044 s->dqrr.next_idx, pi);
2045 s->dqrr.reset_bug = 0;
2047 qbman_cena_invalidate_prefetch(&s->sys,
2048 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2050 p = qbman_cena_read_wo_shadow(&s->sys,
2051 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2055 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2056 * in the DQRR reset bug workaround, we shouldn't need to skip these
2057 * check, because we've already determined that a new entry is available
2058 * and we've invalidated the cacheline before reading it, so the
2059 * valid-bit behaviour is repaired and should tell us what we already
2060 * knew from reading PI.
2062 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2065 /* There's something there. Move "next_idx" attention to the next ring
2066 * entry (and prefetch it) before returning what we found.
2069 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2070 s->dqrr.next_idx = 0;
2071 s->dqrr.valid_bit ^= QB_VALID_BIT;
2073 /* If this is the final response to a volatile dequeue command
2074 * indicate that the vdq is no longer busy
2077 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2078 if ((response_verb == QBMAN_RESULT_DQ) &&
2079 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2080 (flags & QBMAN_DQ_STAT_EXPIRED))
2081 atomic_inc(&s->vdq.busy);
2086 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s)
2089 uint32_t response_verb;
2091 const struct qbman_result *p;
2093 /* Before using valid-bit to detect if something is there, we have to
2094 * handle the case of the DQRR reset bug...
2096 if (s->dqrr.reset_bug) {
2097 /* We pick up new entries by cache-inhibited producer index,
2098 * which means that a non-coherent mapping would require us to
2099 * invalidate and read *only* once that PI has indicated that
2100 * there's an entry here. The first trip around the DQRR ring
2101 * will be much less efficient than all subsequent trips around
2104 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2107 /* there are new entries if pi != next_idx */
2108 if (pi == s->dqrr.next_idx)
2111 /* if next_idx is/was the last ring index, and 'pi' is
2112 * different, we can disable the workaround as all the ring
2113 * entries have now been DMA'd to so valid-bit checking is
2114 * repaired. Note: this logic needs to be based on next_idx
2115 * (which increments one at a time), rather than on pi (which
2116 * can burst and wrap-around between our snapshots of it).
2118 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2119 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2120 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2121 s->dqrr.next_idx, pi);
2122 s->dqrr.reset_bug = 0;
2125 p = qbman_cinh_read_wo_shadow(&s->sys,
2126 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2130 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2131 * in the DQRR reset bug workaround, we shouldn't need to skip these
2132 * check, because we've already determined that a new entry is available
2133 * and we've invalidated the cacheline before reading it, so the
2134 * valid-bit behaviour is repaired and should tell us what we already
2135 * knew from reading PI.
2137 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2140 /* There's something there. Move "next_idx" attention to the next ring
2141 * entry (and prefetch it) before returning what we found.
2144 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2145 s->dqrr.next_idx = 0;
2146 s->dqrr.valid_bit ^= QB_VALID_BIT;
2148 /* If this is the final response to a volatile dequeue command
2149 * indicate that the vdq is no longer busy
2152 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2153 if ((response_verb == QBMAN_RESULT_DQ) &&
2154 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2155 (flags & QBMAN_DQ_STAT_EXPIRED))
2156 atomic_inc(&s->vdq.busy);
2161 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
2164 uint32_t response_verb;
2166 const struct qbman_result *p;
2168 p = qbman_cena_read_wo_shadow(&s->sys,
2169 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
2173 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2174 * in the DQRR reset bug workaround, we shouldn't need to skip these
2175 * check, because we've already determined that a new entry is available
2176 * and we've invalidated the cacheline before reading it, so the
2177 * valid-bit behaviour is repaired and should tell us what we already
2178 * knew from reading PI.
2180 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2183 /* There's something there. Move "next_idx" attention to the next ring
2184 * entry (and prefetch it) before returning what we found.
2187 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2188 s->dqrr.next_idx = 0;
2189 s->dqrr.valid_bit ^= QB_VALID_BIT;
2191 /* If this is the final response to a volatile dequeue command
2192 * indicate that the vdq is no longer busy
2195 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2196 if ((response_verb == QBMAN_RESULT_DQ)
2197 && (flags & QBMAN_DQ_STAT_VOLATILE)
2198 && (flags & QBMAN_DQ_STAT_EXPIRED))
2199 atomic_inc(&s->vdq.busy);
2203 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2204 void qbman_swp_dqrr_consume(struct qbman_swp *s,
2205 const struct qbman_result *dq)
2207 qbman_cinh_write(&s->sys,
2208 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
2211 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2212 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
2215 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
2218 /*********************************/
2219 /* Polling user-provided storage */
2220 /*********************************/
2222 int qbman_result_has_new_result(struct qbman_swp *s,
2223 struct qbman_result *dq)
2225 if (dq->dq.tok == 0)
2229 * Set token to be 0 so we will detect change back to 1
2230 * next time the looping is traversed. Const is cast away here
2231 * as we want users to treat the dequeue responses as read only.
2233 ((struct qbman_result *)dq)->dq.tok = 0;
2236 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2237 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2238 * that makes it available. Eg. we may be looking at our 10th dequeue
2239 * result, having released VDQCR after the 1st result and it is now
2240 * busy due to some other command!
2242 if (s->vdq.storage == dq) {
2243 s->vdq.storage = NULL;
2244 atomic_inc(&s->vdq.busy);
2250 int qbman_check_new_result(struct qbman_result *dq)
2252 if (dq->dq.tok == 0)
2256 * Set token to be 0 so we will detect change back to 1
2257 * next time the looping is traversed. Const is cast away here
2258 * as we want users to treat the dequeue responses as read only.
2260 ((struct qbman_result *)dq)->dq.tok = 0;
2265 int qbman_check_command_complete(struct qbman_result *dq)
2267 struct qbman_swp *s;
2269 if (dq->dq.tok == 0)
2272 s = portal_idx_map[dq->dq.tok - 1];
2274 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2275 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2276 * that makes it available. Eg. we may be looking at our 10th dequeue
2277 * result, having released VDQCR after the 1st result and it is now
2278 * busy due to some other command!
2280 if (s->vdq.storage == dq) {
2281 s->vdq.storage = NULL;
2282 atomic_inc(&s->vdq.busy);
2288 /********************************/
2289 /* Categorising qbman results */
2290 /********************************/
2292 static inline int __qbman_result_is_x(const struct qbman_result *dq,
2295 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
2297 return (response_verb == x);
2300 int qbman_result_is_DQ(const struct qbman_result *dq)
2302 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
2305 int qbman_result_is_FQDAN(const struct qbman_result *dq)
2307 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
2310 int qbman_result_is_CDAN(const struct qbman_result *dq)
2312 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
2315 int qbman_result_is_CSCN(const struct qbman_result *dq)
2317 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
2318 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
2321 int qbman_result_is_BPSCN(const struct qbman_result *dq)
2323 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
2326 int qbman_result_is_CGCU(const struct qbman_result *dq)
2328 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
2331 int qbman_result_is_FQRN(const struct qbman_result *dq)
2333 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
2336 int qbman_result_is_FQRNI(const struct qbman_result *dq)
2338 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
2341 int qbman_result_is_FQPN(const struct qbman_result *dq)
2343 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
2346 /*********************************/
2347 /* Parsing frame dequeue results */
2348 /*********************************/
2350 /* These APIs assume qbman_result_is_DQ() is TRUE */
2352 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
2357 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
2359 return dq->dq.seqnum;
2362 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
2364 return dq->dq.oprid;
2367 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
2372 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
2374 return dq->dq.fq_byte_cnt;
2377 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
2379 return dq->dq.fq_frm_cnt;
2382 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
2384 return dq->dq.fqd_ctx;
2387 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
2389 return (const struct qbman_fd *)&dq->dq.fd[0];
2392 /**************************************/
2393 /* Parsing state-change notifications */
2394 /**************************************/
2395 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
2397 return scn->scn.state;
2400 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
2402 return scn->scn.rid_tok;
2405 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
2407 return scn->scn.ctx;
2413 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
2415 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
2418 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
2420 return !(int)(qbman_result_SCN_state(scn) & 0x1);
2423 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
2425 return (int)(qbman_result_SCN_state(scn) & 0x2);
2428 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
2430 return (int)(qbman_result_SCN_state(scn) & 0x4);
2433 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
2435 return qbman_result_SCN_ctx(scn);
2441 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2443 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2446 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2448 return qbman_result_SCN_ctx(scn);
2451 /********************/
2452 /* Parsing EQ RESP */
2453 /********************/
2454 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2456 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2459 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2461 eqresp->eq_resp.rspid = val;
2464 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2466 return eqresp->eq_resp.rspid;
2469 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2471 if (eqresp->eq_resp.rc == 0xE)
2477 /******************/
2478 /* Buffer release */
2479 /******************/
2480 #define QB_BR_RC_VALID_SHIFT 5
2481 #define QB_BR_RCDI_SHIFT 6
2483 void qbman_release_desc_clear(struct qbman_release_desc *d)
2485 memset(d, 0, sizeof(*d));
2486 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2489 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2494 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2497 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2499 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2502 #define RAR_IDX(rar) ((rar) & 0x7)
2503 #define RAR_VB(rar) ((rar) & 0x80)
2504 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2506 static int qbman_swp_release_direct(struct qbman_swp *s,
2507 const struct qbman_release_desc *d,
2508 const uint64_t *buffers,
2509 unsigned int num_buffers)
2512 const uint32_t *cl = qb_cl(d);
2513 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2515 pr_debug("RAR=%08x\n", rar);
2516 if (!RAR_SUCCESS(rar))
2519 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2521 /* Start the release command */
2522 p = qbman_cena_write_start_wo_shadow(&s->sys,
2523 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2525 /* Copy the caller's buffer pointers to the command */
2526 u64_to_le32_copy(&p[2], buffers, num_buffers);
2528 /* Set the verb byte, have to substitute in the valid-bit and the
2529 * number of buffers.
2532 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2533 qbman_cena_write_complete_wo_shadow(&s->sys,
2534 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2539 static int qbman_swp_release_cinh_direct(struct qbman_swp *s,
2540 const struct qbman_release_desc *d,
2541 const uint64_t *buffers,
2542 unsigned int num_buffers)
2545 const uint32_t *cl = qb_cl(d);
2546 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2548 pr_debug("RAR=%08x\n", rar);
2549 if (!RAR_SUCCESS(rar))
2552 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2554 /* Start the release command */
2555 p = qbman_cinh_write_start_wo_shadow(&s->sys,
2556 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2558 /* Copy the caller's buffer pointers to the command */
2559 memcpy_byte_by_byte(&p[2], buffers, num_buffers * sizeof(uint64_t));
2561 /* Set the verb byte, have to substitute in the valid-bit and the
2562 * number of buffers.
2565 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2570 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2571 const struct qbman_release_desc *d,
2572 const uint64_t *buffers,
2573 unsigned int num_buffers)
2576 const uint32_t *cl = qb_cl(d);
2577 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2579 pr_debug("RAR=%08x\n", rar);
2580 if (!RAR_SUCCESS(rar))
2583 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2585 /* Start the release command */
2586 p = qbman_cena_write_start_wo_shadow(&s->sys,
2587 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2589 /* Copy the caller's buffer pointers to the command */
2590 u64_to_le32_copy(&p[2], buffers, num_buffers);
2592 /* Set the verb byte, have to substitute in the valid-bit and the
2593 * number of buffers.
2595 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2597 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2598 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2603 int qbman_swp_release(struct qbman_swp *s,
2604 const struct qbman_release_desc *d,
2605 const uint64_t *buffers,
2606 unsigned int num_buffers)
2609 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2611 return qbman_swp_release_cinh_direct(s, d, buffers,
2615 /*******************/
2616 /* Buffer acquires */
2617 /*******************/
2618 struct qbman_acquire_desc {
2623 uint8_t reserved2[59];
2626 struct qbman_acquire_rslt {
2631 uint8_t reserved2[3];
2635 static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid,
2636 uint64_t *buffers, unsigned int num_buffers)
2638 struct qbman_acquire_desc *p;
2639 struct qbman_acquire_rslt *r;
2641 if (!num_buffers || (num_buffers > 7))
2644 /* Start the management command */
2645 p = qbman_swp_mc_start(s);
2650 /* Encode the caller-provided attributes */
2652 p->num = num_buffers;
2654 /* Complete the management command */
2655 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2657 pr_err("qbman: acquire from BPID %d failed, no response\n",
2662 /* Decode the outcome */
2663 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2665 /* Determine success or failure */
2666 if (r->rslt != QBMAN_MC_RSLT_OK) {
2667 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2672 QBMAN_BUG_ON(r->num > num_buffers);
2674 /* Copy the acquired buffers to the caller's array */
2675 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2680 static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid,
2681 uint64_t *buffers, unsigned int num_buffers)
2683 struct qbman_acquire_desc *p;
2684 struct qbman_acquire_rslt *r;
2686 if (!num_buffers || (num_buffers > 7))
2689 /* Start the management command */
2690 p = qbman_swp_mc_start(s);
2695 /* Encode the caller-provided attributes */
2697 p->num = num_buffers;
2699 /* Complete the management command */
2700 r = qbman_swp_mc_complete_cinh(s, p, QBMAN_MC_ACQUIRE);
2702 pr_err("qbman: acquire from BPID %d failed, no response\n",
2707 /* Decode the outcome */
2708 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2710 /* Determine success or failure */
2711 if (r->rslt != QBMAN_MC_RSLT_OK) {
2712 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2717 QBMAN_BUG_ON(r->num > num_buffers);
2719 /* Copy the acquired buffers to the caller's array */
2720 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2725 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2726 unsigned int num_buffers)
2729 return qbman_swp_acquire_direct(s, bpid, buffers, num_buffers);
2731 return qbman_swp_acquire_cinh_direct(s, bpid, buffers,
2738 struct qbman_alt_fq_state_desc {
2740 uint8_t reserved[3];
2742 uint8_t reserved2[56];
2745 struct qbman_alt_fq_state_rslt {
2748 uint8_t reserved[62];
2751 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2753 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2754 uint8_t alt_fq_verb)
2756 struct qbman_alt_fq_state_desc *p;
2757 struct qbman_alt_fq_state_rslt *r;
2759 /* Start the management command */
2760 p = qbman_swp_mc_start(s);
2764 p->fqid = fqid & ALT_FQ_FQID_MASK;
2766 /* Complete the management command */
2767 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2769 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2774 /* Decode the outcome */
2775 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2777 /* Determine success or failure */
2778 if (r->rslt != QBMAN_MC_RSLT_OK) {
2779 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2780 fqid, alt_fq_verb, r->rslt);
2787 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2789 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2792 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2794 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2797 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2799 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2802 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2804 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2807 /**********************/
2808 /* Channel management */
2809 /**********************/
2811 struct qbman_cdan_ctrl_desc {
2819 uint8_t reserved3[48];
2823 struct qbman_cdan_ctrl_rslt {
2827 uint8_t reserved[60];
2830 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2831 * would be irresponsible to expose it.
2833 #define CODE_CDAN_WE_EN 0x1
2834 #define CODE_CDAN_WE_CTX 0x4
2836 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2837 uint8_t we_mask, uint8_t cdan_en,
2840 struct qbman_cdan_ctrl_desc *p;
2841 struct qbman_cdan_ctrl_rslt *r;
2843 /* Start the management command */
2844 p = qbman_swp_mc_start(s);
2848 /* Encode the caller-provided attributes */
2857 /* Complete the management command */
2858 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2860 pr_err("qbman: wqchan config failed, no response\n");
2864 /* Decode the outcome */
2865 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2866 != QBMAN_WQCHAN_CONFIGURE);
2868 /* Determine success or failure */
2869 if (r->rslt != QBMAN_MC_RSLT_OK) {
2870 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2871 channelid, r->rslt);
2878 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2881 return qbman_swp_CDAN_set(s, channelid,
2886 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2888 return qbman_swp_CDAN_set(s, channelid,
2893 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2895 return qbman_swp_CDAN_set(s, channelid,
2900 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2903 return qbman_swp_CDAN_set(s, channelid,
2904 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2908 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2910 return QBMAN_IDX_FROM_DQRR(dqrr);
2913 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2915 struct qbman_result *dq;
2917 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));