1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2018-2020 NXP
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 uint32_t qman_version;
66 /* Internal Function declaration */
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69 const struct qbman_eq_desc *d,
70 const struct qbman_fd *fd);
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73 const struct qbman_eq_desc *d,
74 const struct qbman_fd *fd);
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78 const struct qbman_eq_desc *d,
79 const struct qbman_fd *fd);
81 qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
82 const struct qbman_eq_desc *d,
83 const struct qbman_fd *fd);
85 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
86 const struct qbman_eq_desc *d,
87 const struct qbman_fd *fd);
89 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
90 const struct qbman_eq_desc *d,
91 const struct qbman_fd *fd);
94 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
95 const struct qbman_eq_desc *d,
96 const struct qbman_fd *fd,
100 qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
101 const struct qbman_eq_desc *d,
102 const struct qbman_fd *fd,
106 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
107 const struct qbman_eq_desc *d,
108 const struct qbman_fd *fd,
112 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
113 const struct qbman_eq_desc *d,
114 const struct qbman_fd *fd,
119 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
120 const struct qbman_eq_desc *d,
121 struct qbman_fd **fd,
125 qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
126 const struct qbman_eq_desc *d,
127 struct qbman_fd **fd,
131 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
132 const struct qbman_eq_desc *d,
133 struct qbman_fd **fd,
137 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
138 const struct qbman_eq_desc *d,
139 struct qbman_fd **fd,
144 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
145 const struct qbman_eq_desc *d,
146 const struct qbman_fd *fd,
149 qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
150 const struct qbman_eq_desc *d,
151 const struct qbman_fd *fd,
154 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
155 const struct qbman_eq_desc *d,
156 const struct qbman_fd *fd,
159 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
160 const struct qbman_eq_desc *d,
161 const struct qbman_fd *fd,
165 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
167 qbman_swp_pull_cinh_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
169 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
171 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
172 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s);
173 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
176 qbman_swp_release_direct(struct qbman_swp *s,
177 const struct qbman_release_desc *d,
178 const uint64_t *buffers, unsigned int num_buffers);
180 qbman_swp_release_cinh_direct(struct qbman_swp *s,
181 const struct qbman_release_desc *d,
182 const uint64_t *buffers, unsigned int num_buffers);
184 qbman_swp_release_mem_back(struct qbman_swp *s,
185 const struct qbman_release_desc *d,
186 const uint64_t *buffers, unsigned int num_buffers);
188 /* Function pointers */
189 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
190 const struct qbman_eq_desc *d,
191 const struct qbman_fd *fd)
192 = qbman_swp_enqueue_array_mode_direct;
194 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
195 const struct qbman_eq_desc *d,
196 const struct qbman_fd *fd)
197 = qbman_swp_enqueue_ring_mode_direct;
199 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
200 const struct qbman_eq_desc *d,
201 const struct qbman_fd *fd,
204 = qbman_swp_enqueue_multiple_direct;
206 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
207 const struct qbman_eq_desc *d,
208 struct qbman_fd **fd,
211 = qbman_swp_enqueue_multiple_fd_direct;
213 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
214 const struct qbman_eq_desc *d,
215 const struct qbman_fd *fd,
217 = qbman_swp_enqueue_multiple_desc_direct;
219 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
220 struct qbman_pull_desc *d)
221 = qbman_swp_pull_direct;
223 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
224 = qbman_swp_dqrr_next_direct;
226 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
227 const struct qbman_release_desc *d,
228 const uint64_t *buffers, unsigned int num_buffers)
229 = qbman_swp_release_direct;
231 /*********************************/
232 /* Portal constructor/destructor */
233 /*********************************/
235 /* Software portals should always be in the power-on state when we initialise,
236 * due to the CCSR-based portal reset functionality that MC has.
238 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
239 * valid-bits, so we need to support a workaround where we don't trust
240 * valid-bits when detecting new entries until any stale ring entries have been
241 * overwritten at least once. The idea is that we read PI for the first few
242 * entries, then switch to valid-bit after that. The trick is to clear the
243 * bug-work-around boolean once the PI wraps around the ring for the first time.
245 * Note: this still carries a slight additional cost once the decrementer hits
248 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
253 struct qbman_swp *p = malloc(sizeof(*p));
258 memset(p, 0, sizeof(struct qbman_swp));
261 #ifdef QBMAN_CHECKING
262 p->mc.check = swp_mc_can_start;
264 p->mc.valid_bit = QB_VALID_BIT;
265 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
266 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
267 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
268 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
269 && (d->cena_access_mode == qman_cena_fastest_access))
270 p->mr.valid_bit = QB_VALID_BIT;
272 atomic_set(&p->vdq.busy, 1);
273 p->vdq.valid_bit = QB_VALID_BIT;
274 p->dqrr.valid_bit = QB_VALID_BIT;
275 qman_version = p->desc.qman_version;
276 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
277 p->dqrr.dqrr_size = 4;
278 p->dqrr.reset_bug = 1;
280 p->dqrr.dqrr_size = 8;
281 p->dqrr.reset_bug = 0;
284 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
287 pr_err("qbman_swp_sys_init() failed %d\n", ret);
291 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
292 * in default state which is an error
294 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
295 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
300 /* SDQCR needs to be initialized to 0 when no channels are
301 * being dequeued from or else the QMan HW will indicate an
302 * error. The values that were calculated above will be
303 * applied when dequeues from a specific channel are enabled.
305 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
307 p->eqcr.pi_ring_size = 8;
308 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
309 && (d->cena_access_mode == qman_cena_fastest_access)) {
310 p->eqcr.pi_ring_size = 32;
311 qbman_swp_enqueue_array_mode_ptr =
312 qbman_swp_enqueue_array_mode_mem_back;
313 qbman_swp_enqueue_ring_mode_ptr =
314 qbman_swp_enqueue_ring_mode_mem_back;
315 qbman_swp_enqueue_multiple_ptr =
316 qbman_swp_enqueue_multiple_mem_back;
317 qbman_swp_enqueue_multiple_fd_ptr =
318 qbman_swp_enqueue_multiple_fd_mem_back;
319 qbman_swp_enqueue_multiple_desc_ptr =
320 qbman_swp_enqueue_multiple_desc_mem_back;
321 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
322 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
323 qbman_swp_release_ptr = qbman_swp_release_mem_back;
326 if (dpaa2_svr_family == SVR_LS1080A) {
327 qbman_swp_enqueue_ring_mode_ptr =
328 qbman_swp_enqueue_ring_mode_cinh_read_direct;
329 qbman_swp_enqueue_multiple_ptr =
330 qbman_swp_enqueue_multiple_cinh_read_direct;
331 qbman_swp_enqueue_multiple_fd_ptr =
332 qbman_swp_enqueue_multiple_fd_cinh_read_direct;
333 qbman_swp_enqueue_multiple_desc_ptr =
334 qbman_swp_enqueue_multiple_desc_cinh_read_direct;
337 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
338 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
339 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
340 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
341 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
342 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
343 && (d->cena_access_mode == qman_cena_fastest_access))
344 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
345 & p->eqcr.pi_ci_mask;
347 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
348 & p->eqcr.pi_ci_mask;
349 p->eqcr.available = p->eqcr.pi_ring_size -
350 qm_cyc_diff(p->eqcr.pi_ring_size,
351 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
352 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
354 portal_idx_map[p->desc.idx] = p;
358 int qbman_swp_update(struct qbman_swp *p, int stash_off)
360 const struct qbman_swp_desc *d = &p->desc;
361 struct qbman_swp_sys *s = &p->sys;
364 /* Nothing needs to be done for QBMAN rev > 5000 with fast access */
365 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
366 && (d->cena_access_mode == qman_cena_fastest_access))
369 ret = qbman_swp_sys_update(s, d, p->dqrr.dqrr_size, stash_off);
371 pr_err("qbman_swp_sys_init() failed %d\n", ret);
375 p->stash_off = stash_off;
380 void qbman_swp_finish(struct qbman_swp *p)
382 #ifdef QBMAN_CHECKING
383 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
385 qbman_swp_sys_finish(&p->sys);
386 portal_idx_map[p->desc.idx] = NULL;
390 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
399 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
401 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
404 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
406 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
409 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
411 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
414 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
416 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
419 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
421 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
424 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
426 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
429 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
431 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
434 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
436 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
439 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
441 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
444 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
446 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
449 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
451 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
454 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
456 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
457 inhibit ? 0xffffffff : 0);
460 /***********************/
461 /* Management commands */
462 /***********************/
465 * Internal code common to all types of management commands.
468 void *qbman_swp_mc_start(struct qbman_swp *p)
471 #ifdef QBMAN_CHECKING
472 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
474 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
475 && (p->desc.cena_access_mode == qman_cena_fastest_access))
476 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
478 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
479 #ifdef QBMAN_CHECKING
481 p->mc.check = swp_mc_can_submit;
486 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
489 #ifdef QBMAN_CHECKING
490 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
492 /* TBD: "|=" is going to hurt performance. Need to move as many fields
493 * out of word zero, and for those that remain, the "OR" needs to occur
494 * at the caller side. This debug check helps to catch cases where the
495 * caller wants to OR but has forgotten to do so.
497 QBMAN_BUG_ON((*v & cmd_verb) != *v);
498 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
499 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
500 *v = cmd_verb | p->mr.valid_bit;
501 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
503 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
506 *v = cmd_verb | p->mc.valid_bit;
507 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
510 #ifdef QBMAN_CHECKING
511 p->mc.check = swp_mc_can_poll;
515 void qbman_swp_mc_submit_cinh(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
518 #ifdef QBMAN_CHECKING
519 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
521 /* TBD: "|=" is going to hurt performance. Need to move as many fields
522 * out of word zero, and for those that remain, the "OR" needs to occur
523 * at the caller side. This debug check helps to catch cases where the
524 * caller wants to OR but has forgotten to do so.
526 QBMAN_BUG_ON((*v & cmd_verb) != *v);
528 *v = cmd_verb | p->mc.valid_bit;
529 qbman_cinh_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
531 #ifdef QBMAN_CHECKING
532 p->mc.check = swp_mc_can_poll;
536 void *qbman_swp_mc_result(struct qbman_swp *p)
539 #ifdef QBMAN_CHECKING
540 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
542 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
543 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
544 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
545 /* Command completed if the valid bit is toggled */
546 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
548 /* Remove the valid-bit -
549 * command completed iff the rest is non-zero
551 verb = ret[0] & ~QB_VALID_BIT;
554 p->mr.valid_bit ^= QB_VALID_BIT;
556 qbman_cena_invalidate_prefetch(&p->sys,
557 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
558 ret = qbman_cena_read(&p->sys,
559 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
560 /* Remove the valid-bit -
561 * command completed iff the rest is non-zero
563 verb = ret[0] & ~QB_VALID_BIT;
566 p->mc.valid_bit ^= QB_VALID_BIT;
568 #ifdef QBMAN_CHECKING
569 p->mc.check = swp_mc_can_start;
574 void *qbman_swp_mc_result_cinh(struct qbman_swp *p)
577 #ifdef QBMAN_CHECKING
578 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
580 ret = qbman_cinh_read_shadow(&p->sys,
581 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
582 /* Remove the valid-bit -
583 * command completed iff the rest is non-zero
585 verb = ret[0] & ~QB_VALID_BIT;
588 p->mc.valid_bit ^= QB_VALID_BIT;
589 #ifdef QBMAN_CHECKING
590 p->mc.check = swp_mc_can_start;
599 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
600 enum qb_enqueue_commands {
602 enqueue_response_always = 1,
603 enqueue_rejects_to_fq = 2
606 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
607 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
608 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
609 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
610 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
611 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
612 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
613 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
615 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
617 memset(d, 0, sizeof(*d));
620 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
622 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
624 d->eq.verb |= enqueue_response_always;
626 d->eq.verb |= enqueue_rejects_to_fq;
629 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
630 uint16_t opr_id, uint16_t seqnum, int incomplete)
632 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
634 d->eq.verb |= enqueue_response_always;
636 d->eq.verb |= enqueue_rejects_to_fq;
638 d->eq.orpid = opr_id;
639 d->eq.seqnum = seqnum;
641 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
643 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
646 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
649 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
650 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
651 d->eq.orpid = opr_id;
652 d->eq.seqnum = seqnum;
653 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
654 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
657 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
660 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
661 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
662 d->eq.orpid = opr_id;
663 d->eq.seqnum = seqnum;
664 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
665 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
668 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
669 dma_addr_t storage_phys,
672 d->eq.rsp_addr = storage_phys;
676 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
681 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
683 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
687 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
688 uint16_t qd_bin, uint8_t qd_prio)
690 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
692 d->eq.qdbin = qd_bin;
693 d->eq.qpri = qd_prio;
696 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
699 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
701 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
704 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
705 uint8_t dqrr_idx, int park)
708 d->eq.dca = dqrr_idx;
710 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
712 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
713 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
715 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
719 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
720 #define EQAR_VB(eqar) ((eqar) & 0x80)
721 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
723 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
727 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
730 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
735 static void memcpy_byte_by_byte(void *to, const void *from, size_t n)
737 const uint8_t *src = from;
738 volatile uint8_t *dest = to;
741 for (i = 0; i < n; i++)
746 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
747 const struct qbman_eq_desc *d,
748 const struct qbman_fd *fd)
751 const uint32_t *cl = qb_cl(d);
752 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
754 pr_debug("EQAR=%08x\n", eqar);
755 if (!EQAR_SUCCESS(eqar))
757 p = qbman_cena_write_start_wo_shadow(&s->sys,
758 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
759 memcpy(&p[1], &cl[1], 28);
760 memcpy(&p[8], fd, sizeof(*fd));
762 /* Set the verb byte, have to substitute in the valid-bit */
764 p[0] = cl[0] | EQAR_VB(eqar);
765 qbman_cena_write_complete_wo_shadow(&s->sys,
766 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
769 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
770 const struct qbman_eq_desc *d,
771 const struct qbman_fd *fd)
774 const uint32_t *cl = qb_cl(d);
775 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
777 pr_debug("EQAR=%08x\n", eqar);
778 if (!EQAR_SUCCESS(eqar))
780 p = qbman_cena_write_start_wo_shadow(&s->sys,
781 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
782 memcpy(&p[1], &cl[1], 28);
783 memcpy(&p[8], fd, sizeof(*fd));
785 /* Set the verb byte, have to substitute in the valid-bit */
786 p[0] = cl[0] | EQAR_VB(eqar);
788 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
792 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
793 const struct qbman_eq_desc *d,
794 const struct qbman_fd *fd)
796 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
799 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
800 const struct qbman_eq_desc *d,
801 const struct qbman_fd *fd)
804 const uint32_t *cl = qb_cl(d);
805 uint32_t eqcr_ci, full_mask, half_mask;
807 half_mask = (s->eqcr.pi_ci_mask>>1);
808 full_mask = s->eqcr.pi_ci_mask;
809 if (!s->eqcr.available) {
810 eqcr_ci = s->eqcr.ci;
811 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
812 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
813 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
814 eqcr_ci, s->eqcr.ci);
815 if (!s->eqcr.available)
819 p = qbman_cena_write_start_wo_shadow(&s->sys,
820 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
821 memcpy(&p[1], &cl[1], 28);
822 memcpy(&p[8], fd, sizeof(*fd));
825 /* Set the verb byte, have to substitute in the valid-bit */
826 p[0] = cl[0] | s->eqcr.pi_vb;
827 qbman_cena_write_complete_wo_shadow(&s->sys,
828 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
830 s->eqcr.pi &= full_mask;
832 if (!(s->eqcr.pi & half_mask))
833 s->eqcr.pi_vb ^= QB_VALID_BIT;
838 static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
840 const struct qbman_eq_desc *d,
841 const struct qbman_fd *fd)
844 const uint32_t *cl = qb_cl(d);
845 uint32_t eqcr_ci, full_mask, half_mask;
847 half_mask = (s->eqcr.pi_ci_mask>>1);
848 full_mask = s->eqcr.pi_ci_mask;
849 if (!s->eqcr.available) {
850 eqcr_ci = s->eqcr.ci;
851 s->eqcr.ci = qbman_cinh_read(&s->sys,
852 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
853 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
854 eqcr_ci, s->eqcr.ci);
855 if (!s->eqcr.available)
859 p = qbman_cinh_write_start_wo_shadow(&s->sys,
860 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
861 memcpy(&p[1], &cl[1], 28);
862 memcpy(&p[8], fd, sizeof(*fd));
865 /* Set the verb byte, have to substitute in the valid-bit */
866 p[0] = cl[0] | s->eqcr.pi_vb;
868 s->eqcr.pi &= full_mask;
870 if (!(s->eqcr.pi & half_mask))
871 s->eqcr.pi_vb ^= QB_VALID_BIT;
876 static int qbman_swp_enqueue_ring_mode_cinh_direct(
878 const struct qbman_eq_desc *d,
879 const struct qbman_fd *fd)
882 const uint32_t *cl = qb_cl(d);
883 uint32_t eqcr_ci, full_mask, half_mask;
885 half_mask = (s->eqcr.pi_ci_mask>>1);
886 full_mask = s->eqcr.pi_ci_mask;
887 if (!s->eqcr.available) {
888 eqcr_ci = s->eqcr.ci;
889 s->eqcr.ci = qbman_cinh_read(&s->sys,
890 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
891 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
892 eqcr_ci, s->eqcr.ci);
893 if (!s->eqcr.available)
897 p = qbman_cinh_write_start_wo_shadow(&s->sys,
898 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
899 memcpy_byte_by_byte(&p[1], &cl[1], 28);
900 memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
903 /* Set the verb byte, have to substitute in the valid-bit */
904 p[0] = cl[0] | s->eqcr.pi_vb;
906 s->eqcr.pi &= full_mask;
908 if (!(s->eqcr.pi & half_mask))
909 s->eqcr.pi_vb ^= QB_VALID_BIT;
914 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
915 const struct qbman_eq_desc *d,
916 const struct qbman_fd *fd)
919 const uint32_t *cl = qb_cl(d);
920 uint32_t eqcr_ci, full_mask, half_mask;
922 half_mask = (s->eqcr.pi_ci_mask>>1);
923 full_mask = s->eqcr.pi_ci_mask;
924 if (!s->eqcr.available) {
925 eqcr_ci = s->eqcr.ci;
926 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
927 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
928 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
929 eqcr_ci, s->eqcr.ci);
930 if (!s->eqcr.available)
934 p = qbman_cena_write_start_wo_shadow(&s->sys,
935 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
936 memcpy(&p[1], &cl[1], 28);
937 memcpy(&p[8], fd, sizeof(*fd));
939 /* Set the verb byte, have to substitute in the valid-bit */
940 p[0] = cl[0] | s->eqcr.pi_vb;
942 s->eqcr.pi &= full_mask;
944 if (!(s->eqcr.pi & half_mask))
945 s->eqcr.pi_vb ^= QB_VALID_BIT;
947 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
948 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
952 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
953 const struct qbman_eq_desc *d,
954 const struct qbman_fd *fd)
957 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
959 return qbman_swp_enqueue_ring_mode_cinh_direct(s, d, fd);
962 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
963 const struct qbman_fd *fd)
965 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
966 return qbman_swp_enqueue_array_mode(s, d, fd);
967 else /* Use ring mode by default */
968 return qbman_swp_enqueue_ring_mode(s, d, fd);
971 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
972 const struct qbman_eq_desc *d,
973 const struct qbman_fd *fd,
978 const uint32_t *cl = qb_cl(d);
979 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
980 int i, num_enqueued = 0;
983 half_mask = (s->eqcr.pi_ci_mask>>1);
984 full_mask = s->eqcr.pi_ci_mask;
985 if (!s->eqcr.available) {
986 eqcr_ci = s->eqcr.ci;
987 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
988 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
989 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
990 eqcr_ci, s->eqcr.ci);
991 if (!s->eqcr.available)
995 eqcr_pi = s->eqcr.pi;
996 num_enqueued = (s->eqcr.available < num_frames) ?
997 s->eqcr.available : num_frames;
998 s->eqcr.available -= num_enqueued;
999 /* Fill in the EQCR ring */
1000 for (i = 0; i < num_enqueued; i++) {
1001 p = qbman_cena_write_start_wo_shadow(&s->sys,
1002 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1003 memcpy(&p[1], &cl[1], 28);
1004 memcpy(&p[8], &fd[i], sizeof(*fd));
1010 /* Set the verb byte, have to substitute in the valid-bit */
1011 eqcr_pi = s->eqcr.pi;
1012 for (i = 0; i < num_enqueued; i++) {
1013 p = qbman_cena_write_start_wo_shadow(&s->sys,
1014 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1015 p[0] = cl[0] | s->eqcr.pi_vb;
1016 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1017 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1019 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1020 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1023 if (!(eqcr_pi & half_mask))
1024 s->eqcr.pi_vb ^= QB_VALID_BIT;
1027 /* Flush all the cacheline without load/store in between */
1028 eqcr_pi = s->eqcr.pi;
1029 addr_cena = (size_t)s->sys.addr_cena;
1030 for (i = 0; i < num_enqueued; i++) {
1031 dcbf((uintptr_t)(addr_cena +
1032 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1035 s->eqcr.pi = eqcr_pi & full_mask;
1037 return num_enqueued;
1040 static int qbman_swp_enqueue_multiple_cinh_read_direct(
1041 struct qbman_swp *s,
1042 const struct qbman_eq_desc *d,
1043 const struct qbman_fd *fd,
1048 const uint32_t *cl = qb_cl(d);
1049 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1050 int i, num_enqueued = 0;
1053 half_mask = (s->eqcr.pi_ci_mask>>1);
1054 full_mask = s->eqcr.pi_ci_mask;
1055 if (!s->eqcr.available) {
1056 eqcr_ci = s->eqcr.ci;
1057 s->eqcr.ci = qbman_cinh_read(&s->sys,
1058 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1059 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1060 eqcr_ci, s->eqcr.ci);
1061 if (!s->eqcr.available)
1065 eqcr_pi = s->eqcr.pi;
1066 num_enqueued = (s->eqcr.available < num_frames) ?
1067 s->eqcr.available : num_frames;
1068 s->eqcr.available -= num_enqueued;
1069 /* Fill in the EQCR ring */
1070 for (i = 0; i < num_enqueued; i++) {
1071 p = qbman_cena_write_start_wo_shadow(&s->sys,
1072 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1073 memcpy(&p[1], &cl[1], 28);
1074 memcpy(&p[8], &fd[i], sizeof(*fd));
1080 /* Set the verb byte, have to substitute in the valid-bit */
1081 eqcr_pi = s->eqcr.pi;
1082 for (i = 0; i < num_enqueued; i++) {
1083 p = qbman_cena_write_start_wo_shadow(&s->sys,
1084 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1085 p[0] = cl[0] | s->eqcr.pi_vb;
1086 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1087 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1089 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1090 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1093 if (!(eqcr_pi & half_mask))
1094 s->eqcr.pi_vb ^= QB_VALID_BIT;
1097 /* Flush all the cacheline without load/store in between */
1098 eqcr_pi = s->eqcr.pi;
1099 addr_cena = (size_t)s->sys.addr_cena;
1100 for (i = 0; i < num_enqueued; i++) {
1102 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1105 s->eqcr.pi = eqcr_pi & full_mask;
1107 return num_enqueued;
1110 static int qbman_swp_enqueue_multiple_cinh_direct(
1111 struct qbman_swp *s,
1112 const struct qbman_eq_desc *d,
1113 const struct qbman_fd *fd,
1118 const uint32_t *cl = qb_cl(d);
1119 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1120 int i, num_enqueued = 0;
1122 half_mask = (s->eqcr.pi_ci_mask>>1);
1123 full_mask = s->eqcr.pi_ci_mask;
1124 if (!s->eqcr.available) {
1125 eqcr_ci = s->eqcr.ci;
1126 s->eqcr.ci = qbman_cinh_read(&s->sys,
1127 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1128 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1129 eqcr_ci, s->eqcr.ci);
1130 if (!s->eqcr.available)
1134 eqcr_pi = s->eqcr.pi;
1135 num_enqueued = (s->eqcr.available < num_frames) ?
1136 s->eqcr.available : num_frames;
1137 s->eqcr.available -= num_enqueued;
1138 /* Fill in the EQCR ring */
1139 for (i = 0; i < num_enqueued; i++) {
1140 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1141 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1142 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1143 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1149 /* Set the verb byte, have to substitute in the valid-bit */
1150 eqcr_pi = s->eqcr.pi;
1151 for (i = 0; i < num_enqueued; i++) {
1152 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1153 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1154 p[0] = cl[0] | s->eqcr.pi_vb;
1155 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1156 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1158 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1159 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1162 if (!(eqcr_pi & half_mask))
1163 s->eqcr.pi_vb ^= QB_VALID_BIT;
1166 s->eqcr.pi = eqcr_pi & full_mask;
1168 return num_enqueued;
1171 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
1172 const struct qbman_eq_desc *d,
1173 const struct qbman_fd *fd,
1178 const uint32_t *cl = qb_cl(d);
1179 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1180 int i, num_enqueued = 0;
1182 half_mask = (s->eqcr.pi_ci_mask>>1);
1183 full_mask = s->eqcr.pi_ci_mask;
1184 if (!s->eqcr.available) {
1185 eqcr_ci = s->eqcr.ci;
1186 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1187 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1188 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1189 eqcr_ci, s->eqcr.ci);
1190 if (!s->eqcr.available)
1194 eqcr_pi = s->eqcr.pi;
1195 num_enqueued = (s->eqcr.available < num_frames) ?
1196 s->eqcr.available : num_frames;
1197 s->eqcr.available -= num_enqueued;
1198 /* Fill in the EQCR ring */
1199 for (i = 0; i < num_enqueued; i++) {
1200 p = qbman_cena_write_start_wo_shadow(&s->sys,
1201 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1202 memcpy(&p[1], &cl[1], 28);
1203 memcpy(&p[8], &fd[i], sizeof(*fd));
1204 p[0] = cl[0] | s->eqcr.pi_vb;
1206 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1207 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1209 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1210 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1214 if (!(eqcr_pi & half_mask))
1215 s->eqcr.pi_vb ^= QB_VALID_BIT;
1217 s->eqcr.pi = eqcr_pi & full_mask;
1220 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1221 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1222 return num_enqueued;
1225 int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1226 const struct qbman_eq_desc *d,
1227 const struct qbman_fd *fd,
1232 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags,
1235 return qbman_swp_enqueue_multiple_cinh_direct(s, d, fd, flags,
1239 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1240 const struct qbman_eq_desc *d,
1241 struct qbman_fd **fd,
1246 const uint32_t *cl = qb_cl(d);
1247 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1248 int i, num_enqueued = 0;
1251 half_mask = (s->eqcr.pi_ci_mask>>1);
1252 full_mask = s->eqcr.pi_ci_mask;
1253 if (!s->eqcr.available) {
1254 eqcr_ci = s->eqcr.ci;
1255 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1256 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1257 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1258 eqcr_ci, s->eqcr.ci);
1259 if (!s->eqcr.available)
1263 eqcr_pi = s->eqcr.pi;
1264 num_enqueued = (s->eqcr.available < num_frames) ?
1265 s->eqcr.available : num_frames;
1266 s->eqcr.available -= num_enqueued;
1267 /* Fill in the EQCR ring */
1268 for (i = 0; i < num_enqueued; i++) {
1269 p = qbman_cena_write_start_wo_shadow(&s->sys,
1270 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1271 memcpy(&p[1], &cl[1], 28);
1272 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1278 /* Set the verb byte, have to substitute in the valid-bit */
1279 eqcr_pi = s->eqcr.pi;
1280 for (i = 0; i < num_enqueued; i++) {
1281 p = qbman_cena_write_start_wo_shadow(&s->sys,
1282 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1283 p[0] = cl[0] | s->eqcr.pi_vb;
1284 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1285 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1287 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1288 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1291 if (!(eqcr_pi & half_mask))
1292 s->eqcr.pi_vb ^= QB_VALID_BIT;
1295 /* Flush all the cacheline without load/store in between */
1296 eqcr_pi = s->eqcr.pi;
1297 addr_cena = (size_t)s->sys.addr_cena;
1298 for (i = 0; i < num_enqueued; i++) {
1300 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1303 s->eqcr.pi = eqcr_pi & full_mask;
1305 return num_enqueued;
1308 static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
1309 struct qbman_swp *s,
1310 const struct qbman_eq_desc *d,
1311 struct qbman_fd **fd,
1316 const uint32_t *cl = qb_cl(d);
1317 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1318 int i, num_enqueued = 0;
1321 half_mask = (s->eqcr.pi_ci_mask>>1);
1322 full_mask = s->eqcr.pi_ci_mask;
1323 if (!s->eqcr.available) {
1324 eqcr_ci = s->eqcr.ci;
1325 s->eqcr.ci = qbman_cinh_read(&s->sys,
1326 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1327 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1328 eqcr_ci, s->eqcr.ci);
1329 if (!s->eqcr.available)
1333 eqcr_pi = s->eqcr.pi;
1334 num_enqueued = (s->eqcr.available < num_frames) ?
1335 s->eqcr.available : num_frames;
1336 s->eqcr.available -= num_enqueued;
1337 /* Fill in the EQCR ring */
1338 for (i = 0; i < num_enqueued; i++) {
1339 p = qbman_cena_write_start_wo_shadow(&s->sys,
1340 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1341 memcpy(&p[1], &cl[1], 28);
1342 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1348 /* Set the verb byte, have to substitute in the valid-bit */
1349 eqcr_pi = s->eqcr.pi;
1350 for (i = 0; i < num_enqueued; i++) {
1351 p = qbman_cena_write_start_wo_shadow(&s->sys,
1352 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1353 p[0] = cl[0] | s->eqcr.pi_vb;
1354 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1355 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1357 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1358 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1361 if (!(eqcr_pi & half_mask))
1362 s->eqcr.pi_vb ^= QB_VALID_BIT;
1365 /* Flush all the cacheline without load/store in between */
1366 eqcr_pi = s->eqcr.pi;
1367 addr_cena = (size_t)s->sys.addr_cena;
1368 for (i = 0; i < num_enqueued; i++) {
1370 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1373 s->eqcr.pi = eqcr_pi & full_mask;
1375 return num_enqueued;
1378 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1379 struct qbman_swp *s,
1380 const struct qbman_eq_desc *d,
1381 struct qbman_fd **fd,
1386 const uint32_t *cl = qb_cl(d);
1387 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1388 int i, num_enqueued = 0;
1390 half_mask = (s->eqcr.pi_ci_mask>>1);
1391 full_mask = s->eqcr.pi_ci_mask;
1392 if (!s->eqcr.available) {
1393 eqcr_ci = s->eqcr.ci;
1394 s->eqcr.ci = qbman_cinh_read(&s->sys,
1395 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1396 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1397 eqcr_ci, s->eqcr.ci);
1398 if (!s->eqcr.available)
1402 eqcr_pi = s->eqcr.pi;
1403 num_enqueued = (s->eqcr.available < num_frames) ?
1404 s->eqcr.available : num_frames;
1405 s->eqcr.available -= num_enqueued;
1406 /* Fill in the EQCR ring */
1407 for (i = 0; i < num_enqueued; i++) {
1408 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1409 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1410 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1411 memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
1417 /* Set the verb byte, have to substitute in the valid-bit */
1418 eqcr_pi = s->eqcr.pi;
1419 for (i = 0; i < num_enqueued; i++) {
1420 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1421 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1422 p[0] = cl[0] | s->eqcr.pi_vb;
1423 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1424 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1426 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1427 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1430 if (!(eqcr_pi & half_mask))
1431 s->eqcr.pi_vb ^= QB_VALID_BIT;
1434 s->eqcr.pi = eqcr_pi & full_mask;
1436 return num_enqueued;
1439 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1440 const struct qbman_eq_desc *d,
1441 struct qbman_fd **fd,
1446 const uint32_t *cl = qb_cl(d);
1447 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1448 int i, num_enqueued = 0;
1450 half_mask = (s->eqcr.pi_ci_mask>>1);
1451 full_mask = s->eqcr.pi_ci_mask;
1452 if (!s->eqcr.available) {
1453 eqcr_ci = s->eqcr.ci;
1454 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1455 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1456 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1457 eqcr_ci, s->eqcr.ci);
1458 if (!s->eqcr.available)
1462 eqcr_pi = s->eqcr.pi;
1463 num_enqueued = (s->eqcr.available < num_frames) ?
1464 s->eqcr.available : num_frames;
1465 s->eqcr.available -= num_enqueued;
1466 /* Fill in the EQCR ring */
1467 for (i = 0; i < num_enqueued; i++) {
1468 p = qbman_cena_write_start_wo_shadow(&s->sys,
1469 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1470 memcpy(&p[1], &cl[1], 28);
1471 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1475 /* Set the verb byte, have to substitute in the valid-bit */
1476 eqcr_pi = s->eqcr.pi;
1477 for (i = 0; i < num_enqueued; i++) {
1478 p = qbman_cena_write_start_wo_shadow(&s->sys,
1479 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1480 p[0] = cl[0] | s->eqcr.pi_vb;
1481 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1482 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1484 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1485 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1488 if (!(eqcr_pi & half_mask))
1489 s->eqcr.pi_vb ^= QB_VALID_BIT;
1491 s->eqcr.pi = eqcr_pi & full_mask;
1494 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1495 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1496 return num_enqueued;
1499 int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1500 const struct qbman_eq_desc *d,
1501 struct qbman_fd **fd,
1506 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags,
1509 return qbman_swp_enqueue_multiple_fd_cinh_direct(s, d, fd,
1513 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1514 const struct qbman_eq_desc *d,
1515 const struct qbman_fd *fd,
1520 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1521 int i, num_enqueued = 0;
1524 half_mask = (s->eqcr.pi_ci_mask>>1);
1525 full_mask = s->eqcr.pi_ci_mask;
1526 if (!s->eqcr.available) {
1527 eqcr_ci = s->eqcr.ci;
1528 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1529 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1530 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1531 eqcr_ci, s->eqcr.ci);
1532 if (!s->eqcr.available)
1536 eqcr_pi = s->eqcr.pi;
1537 num_enqueued = (s->eqcr.available < num_frames) ?
1538 s->eqcr.available : num_frames;
1539 s->eqcr.available -= num_enqueued;
1540 /* Fill in the EQCR ring */
1541 for (i = 0; i < num_enqueued; i++) {
1542 p = qbman_cena_write_start_wo_shadow(&s->sys,
1543 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1545 memcpy(&p[1], &cl[1], 28);
1546 memcpy(&p[8], &fd[i], sizeof(*fd));
1552 /* Set the verb byte, have to substitute in the valid-bit */
1553 eqcr_pi = s->eqcr.pi;
1554 for (i = 0; i < num_enqueued; i++) {
1555 p = qbman_cena_write_start_wo_shadow(&s->sys,
1556 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1558 p[0] = cl[0] | s->eqcr.pi_vb;
1560 if (!(eqcr_pi & half_mask))
1561 s->eqcr.pi_vb ^= QB_VALID_BIT;
1564 /* Flush all the cacheline without load/store in between */
1565 eqcr_pi = s->eqcr.pi;
1566 addr_cena = (size_t)s->sys.addr_cena;
1567 for (i = 0; i < num_enqueued; i++) {
1568 dcbf((uintptr_t)(addr_cena +
1569 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1572 s->eqcr.pi = eqcr_pi & full_mask;
1574 return num_enqueued;
1577 static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
1578 struct qbman_swp *s,
1579 const struct qbman_eq_desc *d,
1580 const struct qbman_fd *fd,
1585 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1586 int i, num_enqueued = 0;
1589 half_mask = (s->eqcr.pi_ci_mask>>1);
1590 full_mask = s->eqcr.pi_ci_mask;
1591 if (!s->eqcr.available) {
1592 eqcr_ci = s->eqcr.ci;
1593 s->eqcr.ci = qbman_cinh_read(&s->sys,
1594 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1595 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1596 eqcr_ci, s->eqcr.ci);
1597 if (!s->eqcr.available)
1601 eqcr_pi = s->eqcr.pi;
1602 num_enqueued = (s->eqcr.available < num_frames) ?
1603 s->eqcr.available : num_frames;
1604 s->eqcr.available -= num_enqueued;
1605 /* Fill in the EQCR ring */
1606 for (i = 0; i < num_enqueued; i++) {
1607 p = qbman_cena_write_start_wo_shadow(&s->sys,
1608 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1610 memcpy(&p[1], &cl[1], 28);
1611 memcpy(&p[8], &fd[i], sizeof(*fd));
1617 /* Set the verb byte, have to substitute in the valid-bit */
1618 eqcr_pi = s->eqcr.pi;
1619 for (i = 0; i < num_enqueued; i++) {
1620 p = qbman_cena_write_start_wo_shadow(&s->sys,
1621 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1623 p[0] = cl[0] | s->eqcr.pi_vb;
1625 if (!(eqcr_pi & half_mask))
1626 s->eqcr.pi_vb ^= QB_VALID_BIT;
1629 /* Flush all the cacheline without load/store in between */
1630 eqcr_pi = s->eqcr.pi;
1631 addr_cena = (size_t)s->sys.addr_cena;
1632 for (i = 0; i < num_enqueued; i++) {
1634 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1637 s->eqcr.pi = eqcr_pi & full_mask;
1639 return num_enqueued;
1642 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1643 struct qbman_swp *s,
1644 const struct qbman_eq_desc *d,
1645 const struct qbman_fd *fd,
1650 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1651 int i, num_enqueued = 0;
1653 half_mask = (s->eqcr.pi_ci_mask>>1);
1654 full_mask = s->eqcr.pi_ci_mask;
1655 if (!s->eqcr.available) {
1656 eqcr_ci = s->eqcr.ci;
1657 s->eqcr.ci = qbman_cinh_read(&s->sys,
1658 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1659 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1660 eqcr_ci, s->eqcr.ci);
1661 if (!s->eqcr.available)
1665 eqcr_pi = s->eqcr.pi;
1666 num_enqueued = (s->eqcr.available < num_frames) ?
1667 s->eqcr.available : num_frames;
1668 s->eqcr.available -= num_enqueued;
1669 /* Fill in the EQCR ring */
1670 for (i = 0; i < num_enqueued; i++) {
1671 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1672 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1674 memcpy_byte_by_byte(&p[1], &cl[1], 28);
1675 memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
1681 /* Set the verb byte, have to substitute in the valid-bit */
1682 eqcr_pi = s->eqcr.pi;
1683 for (i = 0; i < num_enqueued; i++) {
1684 p = qbman_cinh_write_start_wo_shadow(&s->sys,
1685 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1687 p[0] = cl[0] | s->eqcr.pi_vb;
1689 if (!(eqcr_pi & half_mask))
1690 s->eqcr.pi_vb ^= QB_VALID_BIT;
1693 s->eqcr.pi = eqcr_pi & full_mask;
1695 return num_enqueued;
1698 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1699 const struct qbman_eq_desc *d,
1700 const struct qbman_fd *fd,
1705 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1706 int i, num_enqueued = 0;
1708 half_mask = (s->eqcr.pi_ci_mask>>1);
1709 full_mask = s->eqcr.pi_ci_mask;
1710 if (!s->eqcr.available) {
1711 eqcr_ci = s->eqcr.ci;
1712 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1713 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1714 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1715 eqcr_ci, s->eqcr.ci);
1716 if (!s->eqcr.available)
1720 eqcr_pi = s->eqcr.pi;
1721 num_enqueued = (s->eqcr.available < num_frames) ?
1722 s->eqcr.available : num_frames;
1723 s->eqcr.available -= num_enqueued;
1724 /* Fill in the EQCR ring */
1725 for (i = 0; i < num_enqueued; i++) {
1726 p = qbman_cena_write_start_wo_shadow(&s->sys,
1727 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1729 memcpy(&p[1], &cl[1], 28);
1730 memcpy(&p[8], &fd[i], sizeof(*fd));
1734 /* Set the verb byte, have to substitute in the valid-bit */
1735 eqcr_pi = s->eqcr.pi;
1736 for (i = 0; i < num_enqueued; i++) {
1737 p = qbman_cena_write_start_wo_shadow(&s->sys,
1738 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1740 p[0] = cl[0] | s->eqcr.pi_vb;
1742 if (!(eqcr_pi & half_mask))
1743 s->eqcr.pi_vb ^= QB_VALID_BIT;
1746 s->eqcr.pi = eqcr_pi & full_mask;
1749 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1750 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1752 return num_enqueued;
1754 int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1755 const struct qbman_eq_desc *d,
1756 const struct qbman_fd *fd,
1760 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd,
1763 return qbman_swp_enqueue_multiple_desc_cinh_direct(s, d, fd,
1768 /*************************/
1769 /* Static (push) dequeue */
1770 /*************************/
1772 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1774 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1776 QBMAN_BUG_ON(channel_idx > 15);
1777 *enabled = src | (1 << channel_idx);
1780 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1784 QBMAN_BUG_ON(channel_idx > 15);
1786 s->sdq |= 1 << channel_idx;
1788 s->sdq &= ~(1 << channel_idx);
1790 /* Read make the complete src map. If no channels are enabled
1791 * the SDQCR must be 0 or else QMan will assert errors
1793 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1795 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1797 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1800 /***************************/
1801 /* Volatile (pull) dequeue */
1802 /***************************/
1804 /* These should be const, eventually */
1805 #define QB_VDQCR_VERB_DCT_SHIFT 0
1806 #define QB_VDQCR_VERB_DT_SHIFT 2
1807 #define QB_VDQCR_VERB_RLS_SHIFT 4
1808 #define QB_VDQCR_VERB_WAE_SHIFT 5
1809 #define QB_VDQCR_VERB_RAD_SHIFT 6
1813 qb_pull_dt_workqueue,
1814 qb_pull_dt_framequeue
1817 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1819 memset(d, 0, sizeof(*d));
1822 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1823 struct qbman_result *storage,
1824 dma_addr_t storage_phys,
1827 d->pull.rsp_addr_virt = (size_t)storage;
1830 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1833 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1835 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1837 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1839 d->pull.rsp_addr = storage_phys;
1842 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1845 d->pull.numf = numframes - 1;
1848 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1850 d->pull.tok = token;
1853 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1855 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1856 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1857 d->pull.dq_src = fqid;
1860 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1861 enum qbman_pull_type_e dct)
1863 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1864 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1865 d->pull.dq_src = wqid;
1868 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1869 enum qbman_pull_type_e dct)
1871 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1872 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1873 d->pull.dq_src = chid;
1876 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1878 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1880 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1882 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1884 printf("The RAD feature is not valid when RLS = 0\n");
1888 static int qbman_swp_pull_direct(struct qbman_swp *s,
1889 struct qbman_pull_desc *d)
1892 uint32_t *cl = qb_cl(d);
1894 if (!atomic_dec_and_test(&s->vdq.busy)) {
1895 atomic_inc(&s->vdq.busy);
1899 d->pull.tok = s->sys.idx + 1;
1900 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1901 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1902 memcpy(&p[1], &cl[1], 12);
1904 /* Set the verb byte, have to substitute in the valid-bit */
1906 p[0] = cl[0] | s->vdq.valid_bit;
1907 s->vdq.valid_bit ^= QB_VALID_BIT;
1908 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1913 static int qbman_swp_pull_cinh_direct(struct qbman_swp *s,
1914 struct qbman_pull_desc *d)
1917 uint32_t *cl = qb_cl(d);
1919 if (!atomic_dec_and_test(&s->vdq.busy)) {
1920 atomic_inc(&s->vdq.busy);
1924 d->pull.tok = s->sys.idx + 1;
1925 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1926 p = qbman_cinh_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1927 memcpy_byte_by_byte(&p[1], &cl[1], 12);
1929 /* Set the verb byte, have to substitute in the valid-bit */
1931 p[0] = cl[0] | s->vdq.valid_bit;
1932 s->vdq.valid_bit ^= QB_VALID_BIT;
1937 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1938 struct qbman_pull_desc *d)
1941 uint32_t *cl = qb_cl(d);
1943 if (!atomic_dec_and_test(&s->vdq.busy)) {
1944 atomic_inc(&s->vdq.busy);
1948 d->pull.tok = s->sys.idx + 1;
1949 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1950 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1951 memcpy(&p[1], &cl[1], 12);
1953 /* Set the verb byte, have to substitute in the valid-bit */
1954 p[0] = cl[0] | s->vdq.valid_bit;
1955 s->vdq.valid_bit ^= QB_VALID_BIT;
1957 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1962 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1965 return qbman_swp_pull_ptr(s, d);
1967 return qbman_swp_pull_cinh_direct(s, d);
1974 #define QMAN_DQRR_PI_MASK 0xf
1976 #define QBMAN_RESULT_DQ 0x60
1977 #define QBMAN_RESULT_FQRN 0x21
1978 #define QBMAN_RESULT_FQRNI 0x22
1979 #define QBMAN_RESULT_FQPN 0x24
1980 #define QBMAN_RESULT_FQDAN 0x25
1981 #define QBMAN_RESULT_CDAN 0x26
1982 #define QBMAN_RESULT_CSCN_MEM 0x27
1983 #define QBMAN_RESULT_CGCU 0x28
1984 #define QBMAN_RESULT_BPSCN 0x29
1985 #define QBMAN_RESULT_CSCN_WQ 0x2a
1987 #include <rte_prefetch.h>
1989 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1991 const struct qbman_result *p;
1993 p = qbman_cena_read_wo_shadow(&s->sys,
1994 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1998 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1999 * only once, so repeated calls can return a sequence of DQRR entries, without
2000 * requiring they be consumed immediately or in any particular order.
2002 const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
2005 return qbman_swp_dqrr_next_ptr(s);
2007 return qbman_swp_dqrr_next_cinh_direct(s);
2010 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
2013 uint32_t response_verb;
2015 const struct qbman_result *p;
2017 /* Before using valid-bit to detect if something is there, we have to
2018 * handle the case of the DQRR reset bug...
2020 if (s->dqrr.reset_bug) {
2021 /* We pick up new entries by cache-inhibited producer index,
2022 * which means that a non-coherent mapping would require us to
2023 * invalidate and read *only* once that PI has indicated that
2024 * there's an entry here. The first trip around the DQRR ring
2025 * will be much less efficient than all subsequent trips around
2028 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2031 /* there are new entries if pi != next_idx */
2032 if (pi == s->dqrr.next_idx)
2035 /* if next_idx is/was the last ring index, and 'pi' is
2036 * different, we can disable the workaround as all the ring
2037 * entries have now been DMA'd to so valid-bit checking is
2038 * repaired. Note: this logic needs to be based on next_idx
2039 * (which increments one at a time), rather than on pi (which
2040 * can burst and wrap-around between our snapshots of it).
2042 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2043 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2044 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2045 s->dqrr.next_idx, pi);
2046 s->dqrr.reset_bug = 0;
2048 qbman_cena_invalidate_prefetch(&s->sys,
2049 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2051 p = qbman_cena_read_wo_shadow(&s->sys,
2052 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2056 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2057 * in the DQRR reset bug workaround, we shouldn't need to skip these
2058 * check, because we've already determined that a new entry is available
2059 * and we've invalidated the cacheline before reading it, so the
2060 * valid-bit behaviour is repaired and should tell us what we already
2061 * knew from reading PI.
2063 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2066 /* There's something there. Move "next_idx" attention to the next ring
2067 * entry (and prefetch it) before returning what we found.
2070 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2071 s->dqrr.next_idx = 0;
2072 s->dqrr.valid_bit ^= QB_VALID_BIT;
2074 /* If this is the final response to a volatile dequeue command
2075 * indicate that the vdq is no longer busy
2078 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2079 if ((response_verb == QBMAN_RESULT_DQ) &&
2080 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2081 (flags & QBMAN_DQ_STAT_EXPIRED))
2082 atomic_inc(&s->vdq.busy);
2087 const struct qbman_result *qbman_swp_dqrr_next_cinh_direct(struct qbman_swp *s)
2090 uint32_t response_verb;
2092 const struct qbman_result *p;
2094 /* Before using valid-bit to detect if something is there, we have to
2095 * handle the case of the DQRR reset bug...
2097 if (s->dqrr.reset_bug) {
2098 /* We pick up new entries by cache-inhibited producer index,
2099 * which means that a non-coherent mapping would require us to
2100 * invalidate and read *only* once that PI has indicated that
2101 * there's an entry here. The first trip around the DQRR ring
2102 * will be much less efficient than all subsequent trips around
2105 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
2108 /* there are new entries if pi != next_idx */
2109 if (pi == s->dqrr.next_idx)
2112 /* if next_idx is/was the last ring index, and 'pi' is
2113 * different, we can disable the workaround as all the ring
2114 * entries have now been DMA'd to so valid-bit checking is
2115 * repaired. Note: this logic needs to be based on next_idx
2116 * (which increments one at a time), rather than on pi (which
2117 * can burst and wrap-around between our snapshots of it).
2119 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
2120 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
2121 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
2122 s->dqrr.next_idx, pi);
2123 s->dqrr.reset_bug = 0;
2126 p = qbman_cinh_read_wo_shadow(&s->sys,
2127 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
2131 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2132 * in the DQRR reset bug workaround, we shouldn't need to skip these
2133 * check, because we've already determined that a new entry is available
2134 * and we've invalidated the cacheline before reading it, so the
2135 * valid-bit behaviour is repaired and should tell us what we already
2136 * knew from reading PI.
2138 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2141 /* There's something there. Move "next_idx" attention to the next ring
2142 * entry (and prefetch it) before returning what we found.
2145 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2146 s->dqrr.next_idx = 0;
2147 s->dqrr.valid_bit ^= QB_VALID_BIT;
2149 /* If this is the final response to a volatile dequeue command
2150 * indicate that the vdq is no longer busy
2153 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2154 if ((response_verb == QBMAN_RESULT_DQ) &&
2155 (flags & QBMAN_DQ_STAT_VOLATILE) &&
2156 (flags & QBMAN_DQ_STAT_EXPIRED))
2157 atomic_inc(&s->vdq.busy);
2162 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
2165 uint32_t response_verb;
2167 const struct qbman_result *p;
2169 p = qbman_cena_read_wo_shadow(&s->sys,
2170 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
2174 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
2175 * in the DQRR reset bug workaround, we shouldn't need to skip these
2176 * check, because we've already determined that a new entry is available
2177 * and we've invalidated the cacheline before reading it, so the
2178 * valid-bit behaviour is repaired and should tell us what we already
2179 * knew from reading PI.
2181 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
2184 /* There's something there. Move "next_idx" attention to the next ring
2185 * entry (and prefetch it) before returning what we found.
2188 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
2189 s->dqrr.next_idx = 0;
2190 s->dqrr.valid_bit ^= QB_VALID_BIT;
2192 /* If this is the final response to a volatile dequeue command
2193 * indicate that the vdq is no longer busy
2196 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
2197 if ((response_verb == QBMAN_RESULT_DQ)
2198 && (flags & QBMAN_DQ_STAT_VOLATILE)
2199 && (flags & QBMAN_DQ_STAT_EXPIRED))
2200 atomic_inc(&s->vdq.busy);
2204 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2205 void qbman_swp_dqrr_consume(struct qbman_swp *s,
2206 const struct qbman_result *dq)
2208 qbman_cinh_write(&s->sys,
2209 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
2212 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
2213 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
2216 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
2219 /*********************************/
2220 /* Polling user-provided storage */
2221 /*********************************/
2223 int qbman_result_has_new_result(struct qbman_swp *s,
2224 struct qbman_result *dq)
2226 if (dq->dq.tok == 0)
2230 * Set token to be 0 so we will detect change back to 1
2231 * next time the looping is traversed. Const is cast away here
2232 * as we want users to treat the dequeue responses as read only.
2234 ((struct qbman_result *)dq)->dq.tok = 0;
2237 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2238 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2239 * that makes it available. Eg. we may be looking at our 10th dequeue
2240 * result, having released VDQCR after the 1st result and it is now
2241 * busy due to some other command!
2243 if (s->vdq.storage == dq) {
2244 s->vdq.storage = NULL;
2245 atomic_inc(&s->vdq.busy);
2251 int qbman_check_new_result(struct qbman_result *dq)
2253 if (dq->dq.tok == 0)
2257 * Set token to be 0 so we will detect change back to 1
2258 * next time the looping is traversed. Const is cast away here
2259 * as we want users to treat the dequeue responses as read only.
2261 ((struct qbman_result *)dq)->dq.tok = 0;
2266 int qbman_check_command_complete(struct qbman_result *dq)
2268 struct qbman_swp *s;
2270 if (dq->dq.tok == 0)
2273 s = portal_idx_map[dq->dq.tok - 1];
2275 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
2276 * the fact "VDQCR" shows busy doesn't mean that we hold the result
2277 * that makes it available. Eg. we may be looking at our 10th dequeue
2278 * result, having released VDQCR after the 1st result and it is now
2279 * busy due to some other command!
2281 if (s->vdq.storage == dq) {
2282 s->vdq.storage = NULL;
2283 atomic_inc(&s->vdq.busy);
2289 /********************************/
2290 /* Categorising qbman results */
2291 /********************************/
2293 static inline int __qbman_result_is_x(const struct qbman_result *dq,
2296 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
2298 return (response_verb == x);
2301 int qbman_result_is_DQ(const struct qbman_result *dq)
2303 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
2306 int qbman_result_is_FQDAN(const struct qbman_result *dq)
2308 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
2311 int qbman_result_is_CDAN(const struct qbman_result *dq)
2313 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
2316 int qbman_result_is_CSCN(const struct qbman_result *dq)
2318 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
2319 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
2322 int qbman_result_is_BPSCN(const struct qbman_result *dq)
2324 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
2327 int qbman_result_is_CGCU(const struct qbman_result *dq)
2329 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
2332 int qbman_result_is_FQRN(const struct qbman_result *dq)
2334 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
2337 int qbman_result_is_FQRNI(const struct qbman_result *dq)
2339 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
2342 int qbman_result_is_FQPN(const struct qbman_result *dq)
2344 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
2347 /*********************************/
2348 /* Parsing frame dequeue results */
2349 /*********************************/
2351 /* These APIs assume qbman_result_is_DQ() is TRUE */
2353 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
2358 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
2360 return dq->dq.seqnum;
2363 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
2365 return dq->dq.oprid;
2368 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
2373 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
2375 return dq->dq.fq_byte_cnt;
2378 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
2380 return dq->dq.fq_frm_cnt;
2383 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
2385 return dq->dq.fqd_ctx;
2388 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
2390 return (const struct qbman_fd *)&dq->dq.fd[0];
2393 /**************************************/
2394 /* Parsing state-change notifications */
2395 /**************************************/
2396 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
2398 return scn->scn.state;
2401 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
2403 return scn->scn.rid_tok;
2406 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
2408 return scn->scn.ctx;
2414 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
2416 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
2419 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
2421 return !(int)(qbman_result_SCN_state(scn) & 0x1);
2424 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
2426 return (int)(qbman_result_SCN_state(scn) & 0x2);
2429 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
2431 return (int)(qbman_result_SCN_state(scn) & 0x4);
2434 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
2436 return qbman_result_SCN_ctx(scn);
2442 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2444 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2447 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2449 return qbman_result_SCN_ctx(scn);
2452 /********************/
2453 /* Parsing EQ RESP */
2454 /********************/
2455 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2457 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2460 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2462 eqresp->eq_resp.rspid = val;
2465 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2467 return eqresp->eq_resp.rspid;
2470 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2472 if (eqresp->eq_resp.rc == 0xE)
2478 /******************/
2479 /* Buffer release */
2480 /******************/
2481 #define QB_BR_RC_VALID_SHIFT 5
2482 #define QB_BR_RCDI_SHIFT 6
2484 void qbman_release_desc_clear(struct qbman_release_desc *d)
2486 memset(d, 0, sizeof(*d));
2487 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2490 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2495 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2498 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2500 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2503 #define RAR_IDX(rar) ((rar) & 0x7)
2504 #define RAR_VB(rar) ((rar) & 0x80)
2505 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2507 static int qbman_swp_release_direct(struct qbman_swp *s,
2508 const struct qbman_release_desc *d,
2509 const uint64_t *buffers,
2510 unsigned int num_buffers)
2513 const uint32_t *cl = qb_cl(d);
2514 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2516 pr_debug("RAR=%08x\n", rar);
2517 if (!RAR_SUCCESS(rar))
2520 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2522 /* Start the release command */
2523 p = qbman_cena_write_start_wo_shadow(&s->sys,
2524 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2526 /* Copy the caller's buffer pointers to the command */
2527 u64_to_le32_copy(&p[2], buffers, num_buffers);
2529 /* Set the verb byte, have to substitute in the valid-bit and the
2530 * number of buffers.
2533 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2534 qbman_cena_write_complete_wo_shadow(&s->sys,
2535 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2540 static int qbman_swp_release_cinh_direct(struct qbman_swp *s,
2541 const struct qbman_release_desc *d,
2542 const uint64_t *buffers,
2543 unsigned int num_buffers)
2546 const uint32_t *cl = qb_cl(d);
2547 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2549 pr_debug("RAR=%08x\n", rar);
2550 if (!RAR_SUCCESS(rar))
2553 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2555 /* Start the release command */
2556 p = qbman_cinh_write_start_wo_shadow(&s->sys,
2557 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2559 /* Copy the caller's buffer pointers to the command */
2560 memcpy_byte_by_byte(&p[2], buffers, num_buffers * sizeof(uint64_t));
2562 /* Set the verb byte, have to substitute in the valid-bit and the
2563 * number of buffers.
2566 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2571 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2572 const struct qbman_release_desc *d,
2573 const uint64_t *buffers,
2574 unsigned int num_buffers)
2577 const uint32_t *cl = qb_cl(d);
2578 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2580 pr_debug("RAR=%08x\n", rar);
2581 if (!RAR_SUCCESS(rar))
2584 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2586 /* Start the release command */
2587 p = qbman_cena_write_start_wo_shadow(&s->sys,
2588 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2590 /* Copy the caller's buffer pointers to the command */
2591 u64_to_le32_copy(&p[2], buffers, num_buffers);
2593 /* Set the verb byte, have to substitute in the valid-bit and the
2594 * number of buffers.
2596 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2598 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2599 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2604 int qbman_swp_release(struct qbman_swp *s,
2605 const struct qbman_release_desc *d,
2606 const uint64_t *buffers,
2607 unsigned int num_buffers)
2610 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2612 return qbman_swp_release_cinh_direct(s, d, buffers,
2616 /*******************/
2617 /* Buffer acquires */
2618 /*******************/
2619 struct qbman_acquire_desc {
2624 uint8_t reserved2[59];
2627 struct qbman_acquire_rslt {
2632 uint8_t reserved2[3];
2636 static int qbman_swp_acquire_direct(struct qbman_swp *s, uint16_t bpid,
2637 uint64_t *buffers, unsigned int num_buffers)
2639 struct qbman_acquire_desc *p;
2640 struct qbman_acquire_rslt *r;
2642 if (!num_buffers || (num_buffers > 7))
2645 /* Start the management command */
2646 p = qbman_swp_mc_start(s);
2651 /* Encode the caller-provided attributes */
2653 p->num = num_buffers;
2655 /* Complete the management command */
2656 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2658 pr_err("qbman: acquire from BPID %d failed, no response\n",
2663 /* Decode the outcome */
2664 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2666 /* Determine success or failure */
2667 if (r->rslt != QBMAN_MC_RSLT_OK) {
2668 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2673 QBMAN_BUG_ON(r->num > num_buffers);
2675 /* Copy the acquired buffers to the caller's array */
2676 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2681 static int qbman_swp_acquire_cinh_direct(struct qbman_swp *s, uint16_t bpid,
2682 uint64_t *buffers, unsigned int num_buffers)
2684 struct qbman_acquire_desc *p;
2685 struct qbman_acquire_rslt *r;
2687 if (!num_buffers || (num_buffers > 7))
2690 /* Start the management command */
2691 p = qbman_swp_mc_start(s);
2696 /* Encode the caller-provided attributes */
2698 p->num = num_buffers;
2700 /* Complete the management command */
2701 r = qbman_swp_mc_complete_cinh(s, p, QBMAN_MC_ACQUIRE);
2703 pr_err("qbman: acquire from BPID %d failed, no response\n",
2708 /* Decode the outcome */
2709 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2711 /* Determine success or failure */
2712 if (r->rslt != QBMAN_MC_RSLT_OK) {
2713 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2718 QBMAN_BUG_ON(r->num > num_buffers);
2720 /* Copy the acquired buffers to the caller's array */
2721 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2726 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2727 unsigned int num_buffers)
2730 return qbman_swp_acquire_direct(s, bpid, buffers, num_buffers);
2732 return qbman_swp_acquire_cinh_direct(s, bpid, buffers,
2739 struct qbman_alt_fq_state_desc {
2741 uint8_t reserved[3];
2743 uint8_t reserved2[56];
2746 struct qbman_alt_fq_state_rslt {
2749 uint8_t reserved[62];
2752 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2754 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2755 uint8_t alt_fq_verb)
2757 struct qbman_alt_fq_state_desc *p;
2758 struct qbman_alt_fq_state_rslt *r;
2760 /* Start the management command */
2761 p = qbman_swp_mc_start(s);
2765 p->fqid = fqid & ALT_FQ_FQID_MASK;
2767 /* Complete the management command */
2768 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2770 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2775 /* Decode the outcome */
2776 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2778 /* Determine success or failure */
2779 if (r->rslt != QBMAN_MC_RSLT_OK) {
2780 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2781 fqid, alt_fq_verb, r->rslt);
2788 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2790 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2793 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2795 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2798 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2800 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2803 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2805 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2808 /**********************/
2809 /* Channel management */
2810 /**********************/
2812 struct qbman_cdan_ctrl_desc {
2820 uint8_t reserved3[48];
2824 struct qbman_cdan_ctrl_rslt {
2828 uint8_t reserved[60];
2831 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2832 * would be irresponsible to expose it.
2834 #define CODE_CDAN_WE_EN 0x1
2835 #define CODE_CDAN_WE_CTX 0x4
2837 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2838 uint8_t we_mask, uint8_t cdan_en,
2841 struct qbman_cdan_ctrl_desc *p;
2842 struct qbman_cdan_ctrl_rslt *r;
2844 /* Start the management command */
2845 p = qbman_swp_mc_start(s);
2849 /* Encode the caller-provided attributes */
2858 /* Complete the management command */
2859 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2861 pr_err("qbman: wqchan config failed, no response\n");
2865 /* Decode the outcome */
2866 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2867 != QBMAN_WQCHAN_CONFIGURE);
2869 /* Determine success or failure */
2870 if (r->rslt != QBMAN_MC_RSLT_OK) {
2871 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2872 channelid, r->rslt);
2879 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2882 return qbman_swp_CDAN_set(s, channelid,
2887 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2889 return qbman_swp_CDAN_set(s, channelid,
2894 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2896 return qbman_swp_CDAN_set(s, channelid,
2901 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2904 return qbman_swp_CDAN_set(s, channelid,
2905 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2909 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2911 return QBMAN_IDX_FROM_DQRR(dqrr);
2914 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2916 struct qbman_result *dq;
2918 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));