1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2018-2019 NXP
9 #include "qbman_portal.h"
11 /* QBMan portal management command codes */
12 #define QBMAN_MC_ACQUIRE 0x30
13 #define QBMAN_WQCHAN_CONFIGURE 0x46
15 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
16 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
18 /* QBMan FQ management command codes */
19 #define QBMAN_FQ_SCHEDULE 0x48
20 #define QBMAN_FQ_FORCE 0x49
21 #define QBMAN_FQ_XON 0x4d
22 #define QBMAN_FQ_XOFF 0x4e
24 /*******************************/
25 /* Pre-defined attribute codes */
26 /*******************************/
28 #define QBMAN_RESPONSE_VERB_MASK 0x7f
30 /*************************/
31 /* SDQCR attribute codes */
32 /*************************/
33 #define QB_SDQCR_FC_SHIFT 29
34 #define QB_SDQCR_FC_MASK 0x1
35 #define QB_SDQCR_DCT_SHIFT 24
36 #define QB_SDQCR_DCT_MASK 0x3
37 #define QB_SDQCR_TOK_SHIFT 16
38 #define QB_SDQCR_TOK_MASK 0xff
39 #define QB_SDQCR_SRC_SHIFT 0
40 #define QB_SDQCR_SRC_MASK 0xffff
42 /* opaque token for static dequeues */
43 #define QMAN_SDQCR_TOKEN 0xbb
45 enum qbman_sdqcr_dct {
46 qbman_sdqcr_dct_null = 0,
47 qbman_sdqcr_dct_prio_ics,
48 qbman_sdqcr_dct_active_ics,
49 qbman_sdqcr_dct_active
53 qbman_sdqcr_fc_one = 0,
54 qbman_sdqcr_fc_up_to_3 = 1
57 /* We need to keep track of which SWP triggered a pull command
58 * so keep an array of portal IDs and use the token field to
59 * be able to find the proper portal
61 #define MAX_QBMAN_PORTALS 64
62 static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
64 uint32_t qman_version;
66 /* Internal Function declaration */
68 qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
69 const struct qbman_eq_desc *d,
70 const struct qbman_fd *fd);
72 qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
73 const struct qbman_eq_desc *d,
74 const struct qbman_fd *fd);
77 qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
78 const struct qbman_eq_desc *d,
79 const struct qbman_fd *fd);
81 qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
82 const struct qbman_eq_desc *d,
83 const struct qbman_fd *fd);
85 qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
86 const struct qbman_eq_desc *d,
87 const struct qbman_fd *fd);
90 qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
91 const struct qbman_eq_desc *d,
92 const struct qbman_fd *fd,
96 qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
97 const struct qbman_eq_desc *d,
98 const struct qbman_fd *fd,
102 qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
103 const struct qbman_eq_desc *d,
104 const struct qbman_fd *fd,
109 qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
110 const struct qbman_eq_desc *d,
111 struct qbman_fd **fd,
115 qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
116 const struct qbman_eq_desc *d,
117 struct qbman_fd **fd,
121 qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
122 const struct qbman_eq_desc *d,
123 struct qbman_fd **fd,
128 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
129 const struct qbman_eq_desc *d,
130 const struct qbman_fd *fd,
133 qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
134 const struct qbman_eq_desc *d,
135 const struct qbman_fd *fd,
138 qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
139 const struct qbman_eq_desc *d,
140 const struct qbman_fd *fd,
144 qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d);
146 qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d);
148 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
149 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
152 qbman_swp_release_direct(struct qbman_swp *s,
153 const struct qbman_release_desc *d,
154 const uint64_t *buffers, unsigned int num_buffers);
156 qbman_swp_release_mem_back(struct qbman_swp *s,
157 const struct qbman_release_desc *d,
158 const uint64_t *buffers, unsigned int num_buffers);
160 /* Function pointers */
161 static int (*qbman_swp_enqueue_array_mode_ptr)(struct qbman_swp *s,
162 const struct qbman_eq_desc *d,
163 const struct qbman_fd *fd)
164 = qbman_swp_enqueue_array_mode_direct;
166 static int (*qbman_swp_enqueue_ring_mode_ptr)(struct qbman_swp *s,
167 const struct qbman_eq_desc *d,
168 const struct qbman_fd *fd)
169 = qbman_swp_enqueue_ring_mode_direct;
171 static int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
172 const struct qbman_eq_desc *d,
173 const struct qbman_fd *fd,
176 = qbman_swp_enqueue_multiple_direct;
178 static int (*qbman_swp_enqueue_multiple_fd_ptr)(struct qbman_swp *s,
179 const struct qbman_eq_desc *d,
180 struct qbman_fd **fd,
183 = qbman_swp_enqueue_multiple_fd_direct;
185 static int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
186 const struct qbman_eq_desc *d,
187 const struct qbman_fd *fd,
189 = qbman_swp_enqueue_multiple_desc_direct;
191 static int (*qbman_swp_pull_ptr)(struct qbman_swp *s,
192 struct qbman_pull_desc *d)
193 = qbman_swp_pull_direct;
195 const struct qbman_result *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
196 = qbman_swp_dqrr_next_direct;
198 static int (*qbman_swp_release_ptr)(struct qbman_swp *s,
199 const struct qbman_release_desc *d,
200 const uint64_t *buffers, unsigned int num_buffers)
201 = qbman_swp_release_direct;
203 /*********************************/
204 /* Portal constructor/destructor */
205 /*********************************/
207 /* Software portals should always be in the power-on state when we initialise,
208 * due to the CCSR-based portal reset functionality that MC has.
210 * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
211 * valid-bits, so we need to support a workaround where we don't trust
212 * valid-bits when detecting new entries until any stale ring entries have been
213 * overwritten at least once. The idea is that we read PI for the first few
214 * entries, then switch to valid-bit after that. The trick is to clear the
215 * bug-work-around boolean once the PI wraps around the ring for the first time.
217 * Note: this still carries a slight additional cost once the decrementer hits
220 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
225 struct qbman_swp *p = malloc(sizeof(*p));
230 memset(p, 0, sizeof(struct qbman_swp));
233 #ifdef QBMAN_CHECKING
234 p->mc.check = swp_mc_can_start;
236 p->mc.valid_bit = QB_VALID_BIT;
237 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
238 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
239 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
240 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
241 && (d->cena_access_mode == qman_cena_fastest_access))
242 p->mr.valid_bit = QB_VALID_BIT;
244 atomic_set(&p->vdq.busy, 1);
245 p->vdq.valid_bit = QB_VALID_BIT;
246 p->dqrr.valid_bit = QB_VALID_BIT;
247 qman_version = p->desc.qman_version;
248 if ((qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
249 p->dqrr.dqrr_size = 4;
250 p->dqrr.reset_bug = 1;
252 p->dqrr.dqrr_size = 8;
253 p->dqrr.reset_bug = 0;
256 ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
259 pr_err("qbman_swp_sys_init() failed %d\n", ret);
263 /* Verify that the DQRRPI is 0 - if it is not the portal isn't
264 * in default state which is an error
266 if (qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQPI) & 0xF) {
267 pr_err("qbman DQRR PI is not zero, portal is not clean\n");
272 /* SDQCR needs to be initialized to 0 when no channels are
273 * being dequeued from or else the QMan HW will indicate an
274 * error. The values that were calculated above will be
275 * applied when dequeues from a specific channel are enabled.
277 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
279 p->eqcr.pi_ring_size = 8;
280 if ((qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
281 && (d->cena_access_mode == qman_cena_fastest_access)) {
282 p->eqcr.pi_ring_size = 32;
283 qbman_swp_enqueue_array_mode_ptr =
284 qbman_swp_enqueue_array_mode_mem_back;
285 qbman_swp_enqueue_ring_mode_ptr =
286 qbman_swp_enqueue_ring_mode_mem_back;
287 qbman_swp_enqueue_multiple_ptr =
288 qbman_swp_enqueue_multiple_mem_back;
289 qbman_swp_enqueue_multiple_fd_ptr =
290 qbman_swp_enqueue_multiple_fd_mem_back;
291 qbman_swp_enqueue_multiple_desc_ptr =
292 qbman_swp_enqueue_multiple_desc_mem_back;
293 qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
294 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
295 qbman_swp_release_ptr = qbman_swp_release_mem_back;
298 if (dpaa2_svr_family == SVR_LS1080A) {
299 qbman_swp_enqueue_ring_mode_ptr =
300 qbman_swp_enqueue_ring_mode_cinh_direct;
301 qbman_swp_enqueue_multiple_ptr =
302 qbman_swp_enqueue_multiple_cinh_direct;
303 qbman_swp_enqueue_multiple_fd_ptr =
304 qbman_swp_enqueue_multiple_fd_cinh_direct;
305 qbman_swp_enqueue_multiple_desc_ptr =
306 qbman_swp_enqueue_multiple_desc_cinh_direct;
309 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
310 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask<<1) + 1;
311 eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
312 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
313 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
314 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
315 && (d->cena_access_mode == qman_cena_fastest_access))
316 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI)
317 & p->eqcr.pi_ci_mask;
319 p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI)
320 & p->eqcr.pi_ci_mask;
321 p->eqcr.available = p->eqcr.pi_ring_size -
322 qm_cyc_diff(p->eqcr.pi_ring_size,
323 p->eqcr.ci & (p->eqcr.pi_ci_mask<<1),
324 p->eqcr.pi & (p->eqcr.pi_ci_mask<<1));
326 portal_idx_map[p->desc.idx] = p;
330 void qbman_swp_finish(struct qbman_swp *p)
332 #ifdef QBMAN_CHECKING
333 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
335 qbman_swp_sys_finish(&p->sys);
336 portal_idx_map[p->desc.idx] = NULL;
340 const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
349 uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
351 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
354 void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
356 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
359 uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
361 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
364 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
366 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
369 uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p)
371 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_DQRR_ITR);
374 void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask)
376 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_DQRR_ITR, mask);
379 uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p)
381 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ITPR);
384 void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask)
386 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ITPR, mask);
389 uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
391 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
394 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
396 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
399 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
401 return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
404 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
406 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR,
407 inhibit ? 0xffffffff : 0);
410 /***********************/
411 /* Management commands */
412 /***********************/
415 * Internal code common to all types of management commands.
418 void *qbman_swp_mc_start(struct qbman_swp *p)
421 #ifdef QBMAN_CHECKING
422 QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
424 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
425 && (p->desc.cena_access_mode == qman_cena_fastest_access))
426 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR_MEM);
428 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
429 #ifdef QBMAN_CHECKING
431 p->mc.check = swp_mc_can_submit;
436 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
439 #ifdef QBMAN_CHECKING
440 QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
442 /* TBD: "|=" is going to hurt performance. Need to move as many fields
443 * out of word zero, and for those that remain, the "OR" needs to occur
444 * at the caller side. This debug check helps to catch cases where the
445 * caller wants to OR but has forgotten to do so.
447 QBMAN_BUG_ON((*v & cmd_verb) != *v);
448 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
449 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
450 *v = cmd_verb | p->mr.valid_bit;
451 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR_MEM, cmd);
453 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
456 *v = cmd_verb | p->mc.valid_bit;
457 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
460 #ifdef QBMAN_CHECKING
461 p->mc.check = swp_mc_can_poll;
465 void *qbman_swp_mc_result(struct qbman_swp *p)
468 #ifdef QBMAN_CHECKING
469 QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
471 if ((p->desc.qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
472 && (p->desc.cena_access_mode == qman_cena_fastest_access)) {
473 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR_MEM);
474 /* Command completed if the valid bit is toggled */
475 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
477 /* Remove the valid-bit -
478 * command completed iff the rest is non-zero
480 verb = ret[0] & ~QB_VALID_BIT;
483 p->mr.valid_bit ^= QB_VALID_BIT;
485 qbman_cena_invalidate_prefetch(&p->sys,
486 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
487 ret = qbman_cena_read(&p->sys,
488 QBMAN_CENA_SWP_RR(p->mc.valid_bit));
489 /* Remove the valid-bit -
490 * command completed iff the rest is non-zero
492 verb = ret[0] & ~QB_VALID_BIT;
495 p->mc.valid_bit ^= QB_VALID_BIT;
497 #ifdef QBMAN_CHECKING
498 p->mc.check = swp_mc_can_start;
507 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
508 enum qb_enqueue_commands {
510 enqueue_response_always = 1,
511 enqueue_rejects_to_fq = 2
514 #define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
515 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
516 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
517 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
518 #define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
519 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
520 #define QB_ENQUEUE_CMD_NLIS_SHIFT 14
521 #define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
523 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
525 memset(d, 0, sizeof(*d));
528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
530 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
532 d->eq.verb |= enqueue_response_always;
534 d->eq.verb |= enqueue_rejects_to_fq;
537 void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
538 uint16_t opr_id, uint16_t seqnum, int incomplete)
540 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
542 d->eq.verb |= enqueue_response_always;
544 d->eq.verb |= enqueue_rejects_to_fq;
546 d->eq.orpid = opr_id;
547 d->eq.seqnum = seqnum;
549 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
551 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
554 void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
557 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
558 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
559 d->eq.orpid = opr_id;
560 d->eq.seqnum = seqnum;
561 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
562 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
565 void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
568 d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
569 d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
570 d->eq.orpid = opr_id;
571 d->eq.seqnum = seqnum;
572 d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
573 d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
576 void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
577 dma_addr_t storage_phys,
580 d->eq.rsp_addr = storage_phys;
584 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
589 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
591 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
595 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
596 uint16_t qd_bin, uint8_t qd_prio)
598 d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
600 d->eq.qdbin = qd_bin;
601 d->eq.qpri = qd_prio;
604 void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
607 d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
609 d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
612 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
613 uint8_t dqrr_idx, int park)
616 d->eq.dca = dqrr_idx;
618 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
620 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
621 d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
623 d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
627 #define EQAR_IDX(eqar) ((eqar) & 0x1f)
628 #define EQAR_VB(eqar) ((eqar) & 0x80)
629 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
631 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
635 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
638 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_EQCR_AM_RT2 +
644 static int qbman_swp_enqueue_array_mode_direct(struct qbman_swp *s,
645 const struct qbman_eq_desc *d,
646 const struct qbman_fd *fd)
649 const uint32_t *cl = qb_cl(d);
650 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
652 pr_debug("EQAR=%08x\n", eqar);
653 if (!EQAR_SUCCESS(eqar))
655 p = qbman_cena_write_start_wo_shadow(&s->sys,
656 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
657 memcpy(&p[1], &cl[1], 28);
658 memcpy(&p[8], fd, sizeof(*fd));
660 /* Set the verb byte, have to substitute in the valid-bit */
662 p[0] = cl[0] | EQAR_VB(eqar);
663 qbman_cena_write_complete_wo_shadow(&s->sys,
664 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
667 static int qbman_swp_enqueue_array_mode_mem_back(struct qbman_swp *s,
668 const struct qbman_eq_desc *d,
669 const struct qbman_fd *fd)
672 const uint32_t *cl = qb_cl(d);
673 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
675 pr_debug("EQAR=%08x\n", eqar);
676 if (!EQAR_SUCCESS(eqar))
678 p = qbman_cena_write_start_wo_shadow(&s->sys,
679 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
680 memcpy(&p[1], &cl[1], 28);
681 memcpy(&p[8], fd, sizeof(*fd));
683 /* Set the verb byte, have to substitute in the valid-bit */
684 p[0] = cl[0] | EQAR_VB(eqar);
686 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
690 static inline int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
691 const struct qbman_eq_desc *d,
692 const struct qbman_fd *fd)
694 return qbman_swp_enqueue_array_mode_ptr(s, d, fd);
697 static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
698 const struct qbman_eq_desc *d,
699 const struct qbman_fd *fd)
702 const uint32_t *cl = qb_cl(d);
703 uint32_t eqcr_ci, full_mask, half_mask;
705 half_mask = (s->eqcr.pi_ci_mask>>1);
706 full_mask = s->eqcr.pi_ci_mask;
707 if (!s->eqcr.available) {
708 eqcr_ci = s->eqcr.ci;
709 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
710 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
711 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
712 eqcr_ci, s->eqcr.ci);
713 if (!s->eqcr.available)
717 p = qbman_cena_write_start_wo_shadow(&s->sys,
718 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
719 memcpy(&p[1], &cl[1], 28);
720 memcpy(&p[8], fd, sizeof(*fd));
723 /* Set the verb byte, have to substitute in the valid-bit */
724 p[0] = cl[0] | s->eqcr.pi_vb;
725 qbman_cena_write_complete_wo_shadow(&s->sys,
726 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
728 s->eqcr.pi &= full_mask;
730 if (!(s->eqcr.pi & half_mask))
731 s->eqcr.pi_vb ^= QB_VALID_BIT;
736 static int qbman_swp_enqueue_ring_mode_cinh_direct(
738 const struct qbman_eq_desc *d,
739 const struct qbman_fd *fd)
742 const uint32_t *cl = qb_cl(d);
743 uint32_t eqcr_ci, full_mask, half_mask;
745 half_mask = (s->eqcr.pi_ci_mask>>1);
746 full_mask = s->eqcr.pi_ci_mask;
747 if (!s->eqcr.available) {
748 eqcr_ci = s->eqcr.ci;
749 s->eqcr.ci = qbman_cinh_read(&s->sys,
750 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
751 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
752 eqcr_ci, s->eqcr.ci);
753 if (!s->eqcr.available)
757 p = qbman_cena_write_start_wo_shadow(&s->sys,
758 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
759 memcpy(&p[1], &cl[1], 28);
760 memcpy(&p[8], fd, sizeof(*fd));
763 /* Set the verb byte, have to substitute in the valid-bit */
764 p[0] = cl[0] | s->eqcr.pi_vb;
765 qbman_cena_write_complete_wo_shadow(&s->sys,
766 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
768 s->eqcr.pi &= full_mask;
770 if (!(s->eqcr.pi & half_mask))
771 s->eqcr.pi_vb ^= QB_VALID_BIT;
776 static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
777 const struct qbman_eq_desc *d,
778 const struct qbman_fd *fd)
781 const uint32_t *cl = qb_cl(d);
782 uint32_t eqcr_ci, full_mask, half_mask;
784 half_mask = (s->eqcr.pi_ci_mask>>1);
785 full_mask = s->eqcr.pi_ci_mask;
786 if (!s->eqcr.available) {
787 eqcr_ci = s->eqcr.ci;
788 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
789 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
790 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
791 eqcr_ci, s->eqcr.ci);
792 if (!s->eqcr.available)
796 p = qbman_cena_write_start_wo_shadow(&s->sys,
797 QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
798 memcpy(&p[1], &cl[1], 28);
799 memcpy(&p[8], fd, sizeof(*fd));
801 /* Set the verb byte, have to substitute in the valid-bit */
802 p[0] = cl[0] | s->eqcr.pi_vb;
804 s->eqcr.pi &= full_mask;
806 if (!(s->eqcr.pi & half_mask))
807 s->eqcr.pi_vb ^= QB_VALID_BIT;
809 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
810 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
814 static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
815 const struct qbman_eq_desc *d,
816 const struct qbman_fd *fd)
818 return qbman_swp_enqueue_ring_mode_ptr(s, d, fd);
821 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
822 const struct qbman_fd *fd)
824 if (s->sys.eqcr_mode == qman_eqcr_vb_array)
825 return qbman_swp_enqueue_array_mode(s, d, fd);
826 else /* Use ring mode by default */
827 return qbman_swp_enqueue_ring_mode(s, d, fd);
830 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
831 const struct qbman_eq_desc *d,
832 const struct qbman_fd *fd,
837 const uint32_t *cl = qb_cl(d);
838 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
839 int i, num_enqueued = 0;
842 half_mask = (s->eqcr.pi_ci_mask>>1);
843 full_mask = s->eqcr.pi_ci_mask;
844 if (!s->eqcr.available) {
845 eqcr_ci = s->eqcr.ci;
846 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
847 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
848 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
849 eqcr_ci, s->eqcr.ci);
850 if (!s->eqcr.available)
854 eqcr_pi = s->eqcr.pi;
855 num_enqueued = (s->eqcr.available < num_frames) ?
856 s->eqcr.available : num_frames;
857 s->eqcr.available -= num_enqueued;
858 /* Fill in the EQCR ring */
859 for (i = 0; i < num_enqueued; i++) {
860 p = qbman_cena_write_start_wo_shadow(&s->sys,
861 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
862 memcpy(&p[1], &cl[1], 28);
863 memcpy(&p[8], &fd[i], sizeof(*fd));
869 /* Set the verb byte, have to substitute in the valid-bit */
870 eqcr_pi = s->eqcr.pi;
871 for (i = 0; i < num_enqueued; i++) {
872 p = qbman_cena_write_start_wo_shadow(&s->sys,
873 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
874 p[0] = cl[0] | s->eqcr.pi_vb;
875 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
876 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
878 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
879 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
882 if (!(eqcr_pi & half_mask))
883 s->eqcr.pi_vb ^= QB_VALID_BIT;
886 /* Flush all the cacheline without load/store in between */
887 eqcr_pi = s->eqcr.pi;
888 addr_cena = (size_t)s->sys.addr_cena;
889 for (i = 0; i < num_enqueued; i++) {
890 dcbf((uintptr_t)(addr_cena +
891 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
894 s->eqcr.pi = eqcr_pi & full_mask;
899 static int qbman_swp_enqueue_multiple_cinh_direct(
901 const struct qbman_eq_desc *d,
902 const struct qbman_fd *fd,
907 const uint32_t *cl = qb_cl(d);
908 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
909 int i, num_enqueued = 0;
912 half_mask = (s->eqcr.pi_ci_mask>>1);
913 full_mask = s->eqcr.pi_ci_mask;
914 if (!s->eqcr.available) {
915 eqcr_ci = s->eqcr.ci;
916 s->eqcr.ci = qbman_cinh_read(&s->sys,
917 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
918 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
919 eqcr_ci, s->eqcr.ci);
920 if (!s->eqcr.available)
924 eqcr_pi = s->eqcr.pi;
925 num_enqueued = (s->eqcr.available < num_frames) ?
926 s->eqcr.available : num_frames;
927 s->eqcr.available -= num_enqueued;
928 /* Fill in the EQCR ring */
929 for (i = 0; i < num_enqueued; i++) {
930 p = qbman_cena_write_start_wo_shadow(&s->sys,
931 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
932 memcpy(&p[1], &cl[1], 28);
933 memcpy(&p[8], &fd[i], sizeof(*fd));
939 /* Set the verb byte, have to substitute in the valid-bit */
940 eqcr_pi = s->eqcr.pi;
941 for (i = 0; i < num_enqueued; i++) {
942 p = qbman_cena_write_start_wo_shadow(&s->sys,
943 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
944 p[0] = cl[0] | s->eqcr.pi_vb;
945 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
946 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
948 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
949 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
952 if (!(eqcr_pi & half_mask))
953 s->eqcr.pi_vb ^= QB_VALID_BIT;
956 /* Flush all the cacheline without load/store in between */
957 eqcr_pi = s->eqcr.pi;
958 addr_cena = (size_t)s->sys.addr_cena;
959 for (i = 0; i < num_enqueued; i++) {
961 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
964 s->eqcr.pi = eqcr_pi & full_mask;
969 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
970 const struct qbman_eq_desc *d,
971 const struct qbman_fd *fd,
976 const uint32_t *cl = qb_cl(d);
977 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
978 int i, num_enqueued = 0;
980 half_mask = (s->eqcr.pi_ci_mask>>1);
981 full_mask = s->eqcr.pi_ci_mask;
982 if (!s->eqcr.available) {
983 eqcr_ci = s->eqcr.ci;
984 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
985 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
986 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
987 eqcr_ci, s->eqcr.ci);
988 if (!s->eqcr.available)
992 eqcr_pi = s->eqcr.pi;
993 num_enqueued = (s->eqcr.available < num_frames) ?
994 s->eqcr.available : num_frames;
995 s->eqcr.available -= num_enqueued;
996 /* Fill in the EQCR ring */
997 for (i = 0; i < num_enqueued; i++) {
998 p = qbman_cena_write_start_wo_shadow(&s->sys,
999 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1000 memcpy(&p[1], &cl[1], 28);
1001 memcpy(&p[8], &fd[i], sizeof(*fd));
1002 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1003 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1005 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1006 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1009 p[0] = cl[0] | s->eqcr.pi_vb;
1011 if (!(eqcr_pi & half_mask))
1012 s->eqcr.pi_vb ^= QB_VALID_BIT;
1014 s->eqcr.pi = eqcr_pi & full_mask;
1017 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1018 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1019 return num_enqueued;
1022 inline int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1023 const struct qbman_eq_desc *d,
1024 const struct qbman_fd *fd,
1028 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
1031 static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
1032 const struct qbman_eq_desc *d,
1033 struct qbman_fd **fd,
1038 const uint32_t *cl = qb_cl(d);
1039 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1040 int i, num_enqueued = 0;
1043 half_mask = (s->eqcr.pi_ci_mask>>1);
1044 full_mask = s->eqcr.pi_ci_mask;
1045 if (!s->eqcr.available) {
1046 eqcr_ci = s->eqcr.ci;
1047 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1048 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1049 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1050 eqcr_ci, s->eqcr.ci);
1051 if (!s->eqcr.available)
1055 eqcr_pi = s->eqcr.pi;
1056 num_enqueued = (s->eqcr.available < num_frames) ?
1057 s->eqcr.available : num_frames;
1058 s->eqcr.available -= num_enqueued;
1059 /* Fill in the EQCR ring */
1060 for (i = 0; i < num_enqueued; i++) {
1061 p = qbman_cena_write_start_wo_shadow(&s->sys,
1062 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1063 memcpy(&p[1], &cl[1], 28);
1064 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1070 /* Set the verb byte, have to substitute in the valid-bit */
1071 eqcr_pi = s->eqcr.pi;
1072 for (i = 0; i < num_enqueued; i++) {
1073 p = qbman_cena_write_start_wo_shadow(&s->sys,
1074 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1075 p[0] = cl[0] | s->eqcr.pi_vb;
1076 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1077 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1079 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1080 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1083 if (!(eqcr_pi & half_mask))
1084 s->eqcr.pi_vb ^= QB_VALID_BIT;
1087 /* Flush all the cacheline without load/store in between */
1088 eqcr_pi = s->eqcr.pi;
1089 addr_cena = (size_t)s->sys.addr_cena;
1090 for (i = 0; i < num_enqueued; i++) {
1092 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1095 s->eqcr.pi = eqcr_pi & full_mask;
1097 return num_enqueued;
1100 static int qbman_swp_enqueue_multiple_fd_cinh_direct(
1101 struct qbman_swp *s,
1102 const struct qbman_eq_desc *d,
1103 struct qbman_fd **fd,
1108 const uint32_t *cl = qb_cl(d);
1109 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1110 int i, num_enqueued = 0;
1113 half_mask = (s->eqcr.pi_ci_mask>>1);
1114 full_mask = s->eqcr.pi_ci_mask;
1115 if (!s->eqcr.available) {
1116 eqcr_ci = s->eqcr.ci;
1117 s->eqcr.ci = qbman_cinh_read(&s->sys,
1118 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1119 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1120 eqcr_ci, s->eqcr.ci);
1121 if (!s->eqcr.available)
1125 eqcr_pi = s->eqcr.pi;
1126 num_enqueued = (s->eqcr.available < num_frames) ?
1127 s->eqcr.available : num_frames;
1128 s->eqcr.available -= num_enqueued;
1129 /* Fill in the EQCR ring */
1130 for (i = 0; i < num_enqueued; i++) {
1131 p = qbman_cena_write_start_wo_shadow(&s->sys,
1132 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1133 memcpy(&p[1], &cl[1], 28);
1134 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1140 /* Set the verb byte, have to substitute in the valid-bit */
1141 eqcr_pi = s->eqcr.pi;
1142 for (i = 0; i < num_enqueued; i++) {
1143 p = qbman_cena_write_start_wo_shadow(&s->sys,
1144 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1145 p[0] = cl[0] | s->eqcr.pi_vb;
1146 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1147 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1149 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1150 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1153 if (!(eqcr_pi & half_mask))
1154 s->eqcr.pi_vb ^= QB_VALID_BIT;
1157 /* Flush all the cacheline without load/store in between */
1158 eqcr_pi = s->eqcr.pi;
1159 addr_cena = (size_t)s->sys.addr_cena;
1160 for (i = 0; i < num_enqueued; i++) {
1162 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1165 s->eqcr.pi = eqcr_pi & full_mask;
1167 return num_enqueued;
1170 static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
1171 const struct qbman_eq_desc *d,
1172 struct qbman_fd **fd,
1177 const uint32_t *cl = qb_cl(d);
1178 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1179 int i, num_enqueued = 0;
1181 half_mask = (s->eqcr.pi_ci_mask>>1);
1182 full_mask = s->eqcr.pi_ci_mask;
1183 if (!s->eqcr.available) {
1184 eqcr_ci = s->eqcr.ci;
1185 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1186 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1187 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1188 eqcr_ci, s->eqcr.ci);
1189 if (!s->eqcr.available)
1193 eqcr_pi = s->eqcr.pi;
1194 num_enqueued = (s->eqcr.available < num_frames) ?
1195 s->eqcr.available : num_frames;
1196 s->eqcr.available -= num_enqueued;
1197 /* Fill in the EQCR ring */
1198 for (i = 0; i < num_enqueued; i++) {
1199 p = qbman_cena_write_start_wo_shadow(&s->sys,
1200 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1201 memcpy(&p[1], &cl[1], 28);
1202 memcpy(&p[8], fd[i], sizeof(struct qbman_fd));
1206 /* Set the verb byte, have to substitute in the valid-bit */
1207 eqcr_pi = s->eqcr.pi;
1208 for (i = 0; i < num_enqueued; i++) {
1209 p = qbman_cena_write_start_wo_shadow(&s->sys,
1210 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1211 p[0] = cl[0] | s->eqcr.pi_vb;
1212 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
1213 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
1215 d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
1216 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
1219 if (!(eqcr_pi & half_mask))
1220 s->eqcr.pi_vb ^= QB_VALID_BIT;
1222 s->eqcr.pi = eqcr_pi & full_mask;
1225 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1226 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1227 return num_enqueued;
1230 inline int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1231 const struct qbman_eq_desc *d,
1232 struct qbman_fd **fd,
1236 return qbman_swp_enqueue_multiple_fd_ptr(s, d, fd, flags, num_frames);
1239 static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
1240 const struct qbman_eq_desc *d,
1241 const struct qbman_fd *fd,
1246 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1247 int i, num_enqueued = 0;
1250 half_mask = (s->eqcr.pi_ci_mask>>1);
1251 full_mask = s->eqcr.pi_ci_mask;
1252 if (!s->eqcr.available) {
1253 eqcr_ci = s->eqcr.ci;
1254 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1255 QBMAN_CENA_SWP_EQCR_CI) & full_mask;
1256 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1257 eqcr_ci, s->eqcr.ci);
1258 if (!s->eqcr.available)
1262 eqcr_pi = s->eqcr.pi;
1263 num_enqueued = (s->eqcr.available < num_frames) ?
1264 s->eqcr.available : num_frames;
1265 s->eqcr.available -= num_enqueued;
1266 /* Fill in the EQCR ring */
1267 for (i = 0; i < num_enqueued; i++) {
1268 p = qbman_cena_write_start_wo_shadow(&s->sys,
1269 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1271 memcpy(&p[1], &cl[1], 28);
1272 memcpy(&p[8], &fd[i], sizeof(*fd));
1278 /* Set the verb byte, have to substitute in the valid-bit */
1279 eqcr_pi = s->eqcr.pi;
1280 for (i = 0; i < num_enqueued; i++) {
1281 p = qbman_cena_write_start_wo_shadow(&s->sys,
1282 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1284 p[0] = cl[0] | s->eqcr.pi_vb;
1286 if (!(eqcr_pi & half_mask))
1287 s->eqcr.pi_vb ^= QB_VALID_BIT;
1290 /* Flush all the cacheline without load/store in between */
1291 eqcr_pi = s->eqcr.pi;
1292 addr_cena = (size_t)s->sys.addr_cena;
1293 for (i = 0; i < num_enqueued; i++) {
1294 dcbf((uintptr_t)(addr_cena +
1295 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
1298 s->eqcr.pi = eqcr_pi & full_mask;
1300 return num_enqueued;
1303 static int qbman_swp_enqueue_multiple_desc_cinh_direct(
1304 struct qbman_swp *s,
1305 const struct qbman_eq_desc *d,
1306 const struct qbman_fd *fd,
1311 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1312 int i, num_enqueued = 0;
1315 half_mask = (s->eqcr.pi_ci_mask>>1);
1316 full_mask = s->eqcr.pi_ci_mask;
1317 if (!s->eqcr.available) {
1318 eqcr_ci = s->eqcr.ci;
1319 s->eqcr.ci = qbman_cinh_read(&s->sys,
1320 QBMAN_CINH_SWP_EQCR_CI) & full_mask;
1321 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1322 eqcr_ci, s->eqcr.ci);
1323 if (!s->eqcr.available)
1327 eqcr_pi = s->eqcr.pi;
1328 num_enqueued = (s->eqcr.available < num_frames) ?
1329 s->eqcr.available : num_frames;
1330 s->eqcr.available -= num_enqueued;
1331 /* Fill in the EQCR ring */
1332 for (i = 0; i < num_enqueued; i++) {
1333 p = qbman_cena_write_start_wo_shadow(&s->sys,
1334 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1336 memcpy(&p[1], &cl[1], 28);
1337 memcpy(&p[8], &fd[i], sizeof(*fd));
1343 /* Set the verb byte, have to substitute in the valid-bit */
1344 eqcr_pi = s->eqcr.pi;
1345 for (i = 0; i < num_enqueued; i++) {
1346 p = qbman_cena_write_start_wo_shadow(&s->sys,
1347 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1349 p[0] = cl[0] | s->eqcr.pi_vb;
1351 if (!(eqcr_pi & half_mask))
1352 s->eqcr.pi_vb ^= QB_VALID_BIT;
1355 /* Flush all the cacheline without load/store in between */
1356 eqcr_pi = s->eqcr.pi;
1357 addr_cena = (size_t)s->sys.addr_cena;
1358 for (i = 0; i < num_enqueued; i++) {
1360 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1363 s->eqcr.pi = eqcr_pi & full_mask;
1365 return num_enqueued;
1368 static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
1369 const struct qbman_eq_desc *d,
1370 const struct qbman_fd *fd,
1375 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
1376 int i, num_enqueued = 0;
1378 half_mask = (s->eqcr.pi_ci_mask>>1);
1379 full_mask = s->eqcr.pi_ci_mask;
1380 if (!s->eqcr.available) {
1381 eqcr_ci = s->eqcr.ci;
1382 s->eqcr.ci = qbman_cena_read_reg(&s->sys,
1383 QBMAN_CENA_SWP_EQCR_CI_MEMBACK) & full_mask;
1384 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
1385 eqcr_ci, s->eqcr.ci);
1386 if (!s->eqcr.available)
1390 eqcr_pi = s->eqcr.pi;
1391 num_enqueued = (s->eqcr.available < num_frames) ?
1392 s->eqcr.available : num_frames;
1393 s->eqcr.available -= num_enqueued;
1394 /* Fill in the EQCR ring */
1395 for (i = 0; i < num_enqueued; i++) {
1396 p = qbman_cena_write_start_wo_shadow(&s->sys,
1397 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1399 memcpy(&p[1], &cl[1], 28);
1400 memcpy(&p[8], &fd[i], sizeof(*fd));
1404 /* Set the verb byte, have to substitute in the valid-bit */
1405 eqcr_pi = s->eqcr.pi;
1406 for (i = 0; i < num_enqueued; i++) {
1407 p = qbman_cena_write_start_wo_shadow(&s->sys,
1408 QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
1410 p[0] = cl[0] | s->eqcr.pi_vb;
1412 if (!(eqcr_pi & half_mask))
1413 s->eqcr.pi_vb ^= QB_VALID_BIT;
1416 s->eqcr.pi = eqcr_pi & full_mask;
1419 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_EQCR_PI,
1420 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
1422 return num_enqueued;
1424 inline int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1425 const struct qbman_eq_desc *d,
1426 const struct qbman_fd *fd,
1429 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
1432 /*************************/
1433 /* Static (push) dequeue */
1434 /*************************/
1436 void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
1438 uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1440 QBMAN_BUG_ON(channel_idx > 15);
1441 *enabled = src | (1 << channel_idx);
1444 void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
1448 QBMAN_BUG_ON(channel_idx > 15);
1450 s->sdq |= 1 << channel_idx;
1452 s->sdq &= ~(1 << channel_idx);
1454 /* Read make the complete src map. If no channels are enabled
1455 * the SDQCR must be 0 or else QMan will assert errors
1457 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
1459 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
1461 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
1464 /***************************/
1465 /* Volatile (pull) dequeue */
1466 /***************************/
1468 /* These should be const, eventually */
1469 #define QB_VDQCR_VERB_DCT_SHIFT 0
1470 #define QB_VDQCR_VERB_DT_SHIFT 2
1471 #define QB_VDQCR_VERB_RLS_SHIFT 4
1472 #define QB_VDQCR_VERB_WAE_SHIFT 5
1473 #define QB_VDQCR_VERB_RAD_SHIFT 6
1477 qb_pull_dt_workqueue,
1478 qb_pull_dt_framequeue
1481 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
1483 memset(d, 0, sizeof(*d));
1486 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1487 struct qbman_result *storage,
1488 dma_addr_t storage_phys,
1491 d->pull.rsp_addr_virt = (size_t)storage;
1494 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1497 d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1499 d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1501 d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1503 d->pull.rsp_addr = storage_phys;
1506 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
1509 d->pull.numf = numframes - 1;
1512 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
1514 d->pull.tok = token;
1517 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
1519 d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1520 d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1521 d->pull.dq_src = fqid;
1524 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
1525 enum qbman_pull_type_e dct)
1527 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1528 d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1529 d->pull.dq_src = wqid;
1532 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
1533 enum qbman_pull_type_e dct)
1535 d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1536 d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1537 d->pull.dq_src = chid;
1540 void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad)
1542 if (d->pull.verb & (1 << QB_VDQCR_VERB_RLS_SHIFT)) {
1544 d->pull.verb |= 1 << QB_VDQCR_VERB_RAD_SHIFT;
1546 d->pull.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
1548 printf("The RAD feature is not valid when RLS = 0\n");
1552 static int qbman_swp_pull_direct(struct qbman_swp *s,
1553 struct qbman_pull_desc *d)
1556 uint32_t *cl = qb_cl(d);
1558 if (!atomic_dec_and_test(&s->vdq.busy)) {
1559 atomic_inc(&s->vdq.busy);
1563 d->pull.tok = s->sys.idx + 1;
1564 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1565 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1566 memcpy(&p[1], &cl[1], 12);
1568 /* Set the verb byte, have to substitute in the valid-bit */
1570 p[0] = cl[0] | s->vdq.valid_bit;
1571 s->vdq.valid_bit ^= QB_VALID_BIT;
1572 qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
1577 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
1578 struct qbman_pull_desc *d)
1581 uint32_t *cl = qb_cl(d);
1583 if (!atomic_dec_and_test(&s->vdq.busy)) {
1584 atomic_inc(&s->vdq.busy);
1588 d->pull.tok = s->sys.idx + 1;
1589 s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
1590 p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR_MEM);
1591 memcpy(&p[1], &cl[1], 12);
1593 /* Set the verb byte, have to substitute in the valid-bit */
1594 p[0] = cl[0] | s->vdq.valid_bit;
1595 s->vdq.valid_bit ^= QB_VALID_BIT;
1597 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1602 inline int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
1604 return qbman_swp_pull_ptr(s, d);
1611 #define QMAN_DQRR_PI_MASK 0xf
1613 #define QBMAN_RESULT_DQ 0x60
1614 #define QBMAN_RESULT_FQRN 0x21
1615 #define QBMAN_RESULT_FQRNI 0x22
1616 #define QBMAN_RESULT_FQPN 0x24
1617 #define QBMAN_RESULT_FQDAN 0x25
1618 #define QBMAN_RESULT_CDAN 0x26
1619 #define QBMAN_RESULT_CSCN_MEM 0x27
1620 #define QBMAN_RESULT_CGCU 0x28
1621 #define QBMAN_RESULT_BPSCN 0x29
1622 #define QBMAN_RESULT_CSCN_WQ 0x2a
1624 #include <rte_prefetch.h>
1626 void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
1628 const struct qbman_result *p;
1630 p = qbman_cena_read_wo_shadow(&s->sys,
1631 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1635 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
1636 * only once, so repeated calls can return a sequence of DQRR entries, without
1637 * requiring they be consumed immediately or in any particular order.
1639 inline const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
1641 return qbman_swp_dqrr_next_ptr(s);
1644 const struct qbman_result *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1647 uint32_t response_verb;
1649 const struct qbman_result *p;
1651 /* Before using valid-bit to detect if something is there, we have to
1652 * handle the case of the DQRR reset bug...
1654 if (s->dqrr.reset_bug) {
1655 /* We pick up new entries by cache-inhibited producer index,
1656 * which means that a non-coherent mapping would require us to
1657 * invalidate and read *only* once that PI has indicated that
1658 * there's an entry here. The first trip around the DQRR ring
1659 * will be much less efficient than all subsequent trips around
1662 uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
1665 /* there are new entries if pi != next_idx */
1666 if (pi == s->dqrr.next_idx)
1669 /* if next_idx is/was the last ring index, and 'pi' is
1670 * different, we can disable the workaround as all the ring
1671 * entries have now been DMA'd to so valid-bit checking is
1672 * repaired. Note: this logic needs to be based on next_idx
1673 * (which increments one at a time), rather than on pi (which
1674 * can burst and wrap-around between our snapshots of it).
1676 QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
1677 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
1678 pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
1679 s->dqrr.next_idx, pi);
1680 s->dqrr.reset_bug = 0;
1682 qbman_cena_invalidate_prefetch(&s->sys,
1683 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1685 p = qbman_cena_read_wo_shadow(&s->sys,
1686 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1690 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1691 * in the DQRR reset bug workaround, we shouldn't need to skip these
1692 * check, because we've already determined that a new entry is available
1693 * and we've invalidated the cacheline before reading it, so the
1694 * valid-bit behaviour is repaired and should tell us what we already
1695 * knew from reading PI.
1697 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1700 /* There's something there. Move "next_idx" attention to the next ring
1701 * entry (and prefetch it) before returning what we found.
1704 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1705 s->dqrr.next_idx = 0;
1706 s->dqrr.valid_bit ^= QB_VALID_BIT;
1708 /* If this is the final response to a volatile dequeue command
1709 * indicate that the vdq is no longer busy
1712 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1713 if ((response_verb == QBMAN_RESULT_DQ) &&
1714 (flags & QBMAN_DQ_STAT_VOLATILE) &&
1715 (flags & QBMAN_DQ_STAT_EXPIRED))
1716 atomic_inc(&s->vdq.busy);
1721 const struct qbman_result *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1724 uint32_t response_verb;
1726 const struct qbman_result *p;
1728 p = qbman_cena_read_wo_shadow(&s->sys,
1729 QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1733 /* If the valid-bit isn't of the expected polarity, nothing there. Note,
1734 * in the DQRR reset bug workaround, we shouldn't need to skip these
1735 * check, because we've already determined that a new entry is available
1736 * and we've invalidated the cacheline before reading it, so the
1737 * valid-bit behaviour is repaired and should tell us what we already
1738 * knew from reading PI.
1740 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
1743 /* There's something there. Move "next_idx" attention to the next ring
1744 * entry (and prefetch it) before returning what we found.
1747 if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
1748 s->dqrr.next_idx = 0;
1749 s->dqrr.valid_bit ^= QB_VALID_BIT;
1751 /* If this is the final response to a volatile dequeue command
1752 * indicate that the vdq is no longer busy
1755 response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
1756 if ((response_verb == QBMAN_RESULT_DQ)
1757 && (flags & QBMAN_DQ_STAT_VOLATILE)
1758 && (flags & QBMAN_DQ_STAT_EXPIRED))
1759 atomic_inc(&s->vdq.busy);
1763 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1764 void qbman_swp_dqrr_consume(struct qbman_swp *s,
1765 const struct qbman_result *dq)
1767 qbman_cinh_write(&s->sys,
1768 QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1771 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
1772 void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
1775 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
1778 /*********************************/
1779 /* Polling user-provided storage */
1780 /*********************************/
1782 int qbman_result_has_new_result(struct qbman_swp *s,
1783 struct qbman_result *dq)
1785 if (dq->dq.tok == 0)
1789 * Set token to be 0 so we will detect change back to 1
1790 * next time the looping is traversed. Const is cast away here
1791 * as we want users to treat the dequeue responses as read only.
1793 ((struct qbman_result *)dq)->dq.tok = 0;
1796 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1797 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1798 * that makes it available. Eg. we may be looking at our 10th dequeue
1799 * result, having released VDQCR after the 1st result and it is now
1800 * busy due to some other command!
1802 if (s->vdq.storage == dq) {
1803 s->vdq.storage = NULL;
1804 atomic_inc(&s->vdq.busy);
1810 int qbman_check_new_result(struct qbman_result *dq)
1812 if (dq->dq.tok == 0)
1816 * Set token to be 0 so we will detect change back to 1
1817 * next time the looping is traversed. Const is cast away here
1818 * as we want users to treat the dequeue responses as read only.
1820 ((struct qbman_result *)dq)->dq.tok = 0;
1825 int qbman_check_command_complete(struct qbman_result *dq)
1827 struct qbman_swp *s;
1829 if (dq->dq.tok == 0)
1832 s = portal_idx_map[dq->dq.tok - 1];
1834 * VDQCR "no longer busy" hook - not quite the same as DQRR, because
1835 * the fact "VDQCR" shows busy doesn't mean that we hold the result
1836 * that makes it available. Eg. we may be looking at our 10th dequeue
1837 * result, having released VDQCR after the 1st result and it is now
1838 * busy due to some other command!
1840 if (s->vdq.storage == dq) {
1841 s->vdq.storage = NULL;
1842 atomic_inc(&s->vdq.busy);
1848 /********************************/
1849 /* Categorising qbman results */
1850 /********************************/
1852 static inline int __qbman_result_is_x(const struct qbman_result *dq,
1855 uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
1857 return (response_verb == x);
1860 int qbman_result_is_DQ(const struct qbman_result *dq)
1862 return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
1865 int qbman_result_is_FQDAN(const struct qbman_result *dq)
1867 return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
1870 int qbman_result_is_CDAN(const struct qbman_result *dq)
1872 return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
1875 int qbman_result_is_CSCN(const struct qbman_result *dq)
1877 return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
1878 __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
1881 int qbman_result_is_BPSCN(const struct qbman_result *dq)
1883 return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
1886 int qbman_result_is_CGCU(const struct qbman_result *dq)
1888 return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
1891 int qbman_result_is_FQRN(const struct qbman_result *dq)
1893 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
1896 int qbman_result_is_FQRNI(const struct qbman_result *dq)
1898 return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
1901 int qbman_result_is_FQPN(const struct qbman_result *dq)
1903 return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
1906 /*********************************/
1907 /* Parsing frame dequeue results */
1908 /*********************************/
1910 /* These APIs assume qbman_result_is_DQ() is TRUE */
1912 uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
1917 uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
1919 return dq->dq.seqnum;
1922 uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
1924 return dq->dq.oprid;
1927 uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
1932 uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
1934 return dq->dq.fq_byte_cnt;
1937 uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
1939 return dq->dq.fq_frm_cnt;
1942 uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
1944 return dq->dq.fqd_ctx;
1947 const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
1949 return (const struct qbman_fd *)&dq->dq.fd[0];
1952 /**************************************/
1953 /* Parsing state-change notifications */
1954 /**************************************/
1955 uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
1957 return scn->scn.state;
1960 uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
1962 return scn->scn.rid_tok;
1965 uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
1967 return scn->scn.ctx;
1973 uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
1975 return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
1978 int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
1980 return !(int)(qbman_result_SCN_state(scn) & 0x1);
1983 int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
1985 return (int)(qbman_result_SCN_state(scn) & 0x2);
1988 int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
1990 return (int)(qbman_result_SCN_state(scn) & 0x4);
1993 uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
1995 return qbman_result_SCN_ctx(scn);
2001 uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
2003 return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
2006 uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
2008 return qbman_result_SCN_ctx(scn);
2011 /********************/
2012 /* Parsing EQ RESP */
2013 /********************/
2014 struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
2016 return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
2019 void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
2021 eqresp->eq_resp.rspid = val;
2024 uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
2026 return eqresp->eq_resp.rspid;
2029 uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
2031 if (eqresp->eq_resp.rc == 0xE)
2037 /******************/
2038 /* Buffer release */
2039 /******************/
2040 #define QB_BR_RC_VALID_SHIFT 5
2041 #define QB_BR_RCDI_SHIFT 6
2043 void qbman_release_desc_clear(struct qbman_release_desc *d)
2045 memset(d, 0, sizeof(*d));
2046 d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
2049 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
2054 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
2057 d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
2059 d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
2062 #define RAR_IDX(rar) ((rar) & 0x7)
2063 #define RAR_VB(rar) ((rar) & 0x80)
2064 #define RAR_SUCCESS(rar) ((rar) & 0x100)
2066 static int qbman_swp_release_direct(struct qbman_swp *s,
2067 const struct qbman_release_desc *d,
2068 const uint64_t *buffers,
2069 unsigned int num_buffers)
2072 const uint32_t *cl = qb_cl(d);
2073 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2075 pr_debug("RAR=%08x\n", rar);
2076 if (!RAR_SUCCESS(rar))
2079 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2081 /* Start the release command */
2082 p = qbman_cena_write_start_wo_shadow(&s->sys,
2083 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2085 /* Copy the caller's buffer pointers to the command */
2086 u64_to_le32_copy(&p[2], buffers, num_buffers);
2088 /* Set the verb byte, have to substitute in the valid-bit and the
2089 * number of buffers.
2092 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2093 qbman_cena_write_complete_wo_shadow(&s->sys,
2094 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
2099 static int qbman_swp_release_mem_back(struct qbman_swp *s,
2100 const struct qbman_release_desc *d,
2101 const uint64_t *buffers,
2102 unsigned int num_buffers)
2105 const uint32_t *cl = qb_cl(d);
2106 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
2108 pr_debug("RAR=%08x\n", rar);
2109 if (!RAR_SUCCESS(rar))
2112 QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
2114 /* Start the release command */
2115 p = qbman_cena_write_start_wo_shadow(&s->sys,
2116 QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
2118 /* Copy the caller's buffer pointers to the command */
2119 u64_to_le32_copy(&p[2], buffers, num_buffers);
2121 /* Set the verb byte, have to substitute in the valid-bit and the
2122 * number of buffers.
2124 p[0] = cl[0] | RAR_VB(rar) | num_buffers;
2126 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_RCR_AM_RT +
2127 RAR_IDX(rar) * 4, QMAN_RT_MODE);
2132 inline int qbman_swp_release(struct qbman_swp *s,
2133 const struct qbman_release_desc *d,
2134 const uint64_t *buffers,
2135 unsigned int num_buffers)
2137 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
2140 /*******************/
2141 /* Buffer acquires */
2142 /*******************/
2143 struct qbman_acquire_desc {
2148 uint8_t reserved2[59];
2151 struct qbman_acquire_rslt {
2156 uint8_t reserved2[3];
2160 int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
2161 unsigned int num_buffers)
2163 struct qbman_acquire_desc *p;
2164 struct qbman_acquire_rslt *r;
2166 if (!num_buffers || (num_buffers > 7))
2169 /* Start the management command */
2170 p = qbman_swp_mc_start(s);
2175 /* Encode the caller-provided attributes */
2177 p->num = num_buffers;
2179 /* Complete the management command */
2180 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
2182 pr_err("qbman: acquire from BPID %d failed, no response\n",
2187 /* Decode the outcome */
2188 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
2190 /* Determine success or failure */
2191 if (r->rslt != QBMAN_MC_RSLT_OK) {
2192 pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
2197 QBMAN_BUG_ON(r->num > num_buffers);
2199 /* Copy the acquired buffers to the caller's array */
2200 u64_from_le32_copy(buffers, &r->buf[0], r->num);
2208 struct qbman_alt_fq_state_desc {
2210 uint8_t reserved[3];
2212 uint8_t reserved2[56];
2215 struct qbman_alt_fq_state_rslt {
2218 uint8_t reserved[62];
2221 #define ALT_FQ_FQID_MASK 0x00FFFFFF
2223 static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
2224 uint8_t alt_fq_verb)
2226 struct qbman_alt_fq_state_desc *p;
2227 struct qbman_alt_fq_state_rslt *r;
2229 /* Start the management command */
2230 p = qbman_swp_mc_start(s);
2234 p->fqid = fqid & ALT_FQ_FQID_MASK;
2236 /* Complete the management command */
2237 r = qbman_swp_mc_complete(s, p, alt_fq_verb);
2239 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
2244 /* Decode the outcome */
2245 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
2247 /* Determine success or failure */
2248 if (r->rslt != QBMAN_MC_RSLT_OK) {
2249 pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
2250 fqid, alt_fq_verb, r->rslt);
2257 int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
2259 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
2262 int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
2264 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
2267 int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
2269 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
2272 int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
2274 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
2277 /**********************/
2278 /* Channel management */
2279 /**********************/
2281 struct qbman_cdan_ctrl_desc {
2289 uint8_t reserved3[48];
2293 struct qbman_cdan_ctrl_rslt {
2297 uint8_t reserved[60];
2300 /* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
2301 * would be irresponsible to expose it.
2303 #define CODE_CDAN_WE_EN 0x1
2304 #define CODE_CDAN_WE_CTX 0x4
2306 static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
2307 uint8_t we_mask, uint8_t cdan_en,
2310 struct qbman_cdan_ctrl_desc *p;
2311 struct qbman_cdan_ctrl_rslt *r;
2313 /* Start the management command */
2314 p = qbman_swp_mc_start(s);
2318 /* Encode the caller-provided attributes */
2327 /* Complete the management command */
2328 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
2330 pr_err("qbman: wqchan config failed, no response\n");
2334 /* Decode the outcome */
2335 QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
2336 != QBMAN_WQCHAN_CONFIGURE);
2338 /* Determine success or failure */
2339 if (r->rslt != QBMAN_MC_RSLT_OK) {
2340 pr_err("CDAN cQID %d failed: code = 0x%02x\n",
2341 channelid, r->rslt);
2348 int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
2351 return qbman_swp_CDAN_set(s, channelid,
2356 int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
2358 return qbman_swp_CDAN_set(s, channelid,
2363 int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
2365 return qbman_swp_CDAN_set(s, channelid,
2370 int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
2373 return qbman_swp_CDAN_set(s, channelid,
2374 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
2378 uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr)
2380 return QBMAN_IDX_FROM_DQRR(dqrr);
2383 struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
2385 struct qbman_result *dq;
2387 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));