1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <rte_dpaa_bus.h>
6 #include <rte_dmadev_pmd.h>
9 #include "dpaa_qdma_logs.h"
12 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
14 ccdf->addr_hi = upper_32_bits(addr);
15 ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
19 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
21 return ccdf->cfg8b_w1 & 0xff;
25 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
27 return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
32 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
34 ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
38 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
40 return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
45 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
47 ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
51 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
53 csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
57 qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
59 csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
77 qdma_readl(void *addr)
83 qdma_writel(u32 val, void *addr)
89 qdma_readl_be(void *addr)
91 return QDMA_IN_BE(addr);
95 qdma_writel_be(u32 val, void *addr)
97 QDMA_OUT_BE(addr, val);
101 *dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
105 virt_addr = rte_malloc("dma pool alloc", size, aligned);
109 *phy_addr = rte_mem_virt2iova(virt_addr);
115 dma_pool_free(void *addr)
121 fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
123 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
124 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
125 struct fsl_qdma_comp *comp_temp, *_comp_temp;
128 if (--fsl_queue->count)
131 id = (fsl_qdma->block_base - fsl_queue->block_base) /
132 fsl_qdma->block_offset;
134 while (rte_atomic32_read(&wait_task[id]) == 1)
135 rte_delay_us(QDMA_DELAY);
137 list_for_each_entry_safe(comp_temp, _comp_temp,
138 &fsl_queue->comp_used, list) {
139 list_del(&comp_temp->list);
140 dma_pool_free(comp_temp->virt_addr);
141 dma_pool_free(comp_temp->desc_virt_addr);
145 list_for_each_entry_safe(comp_temp, _comp_temp,
146 &fsl_queue->comp_free, list) {
147 list_del(&comp_temp->list);
148 dma_pool_free(comp_temp->virt_addr);
149 dma_pool_free(comp_temp->desc_virt_addr);
154 fsl_qdma->desc_allocated--;
158 fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
159 dma_addr_t dst, dma_addr_t src, u32 len)
161 struct fsl_qdma_format *csgf_src, *csgf_dest;
163 /* Note: command table (fsl_comp->virt_addr) is getting filled
164 * directly in cmd descriptors of queues while enqueuing the descriptor
165 * please refer fsl_qdma_enqueue_desc
166 * frame list table (virt_addr) + 1) and source,
167 * destination descriptor table
168 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
169 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
171 csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
172 csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
174 /* Status notification is enqueued to status queue. */
175 qdma_desc_addr_set64(csgf_src, src);
176 qdma_csgf_set_len(csgf_src, len);
177 qdma_desc_addr_set64(csgf_dest, dst);
178 qdma_csgf_set_len(csgf_dest, len);
179 /* This entry is the last entry. */
180 qdma_csgf_set_f(csgf_dest, len);
184 * Pre-request command descriptor and compound S/G for enqueue.
187 fsl_qdma_pre_request_enqueue_comp_sd_desc(
188 struct fsl_qdma_queue *queue,
189 int size, int aligned)
191 struct fsl_qdma_comp *comp_temp, *_comp_temp;
192 struct fsl_qdma_sdf *sdf;
193 struct fsl_qdma_ddf *ddf;
194 struct fsl_qdma_format *csgf_desc;
197 for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
198 comp_temp = rte_zmalloc("qdma: comp temp",
199 sizeof(*comp_temp), 0);
203 comp_temp->virt_addr =
204 dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
205 if (!comp_temp->virt_addr) {
210 comp_temp->desc_virt_addr =
211 dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
212 if (!comp_temp->desc_virt_addr) {
213 rte_free(comp_temp->virt_addr);
218 memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
219 memset(comp_temp->desc_virt_addr, 0,
220 FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
222 csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
223 sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
224 ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
225 /* Compound Command Descriptor(Frame List Table) */
226 qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
227 /* It must be 32 as Compound S/G Descriptor */
228 qdma_csgf_set_len(csgf_desc, 32);
229 /* Descriptor Buffer */
230 sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
231 FSL_QDMA_CMD_RWTTYPE_OFFSET);
232 ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
233 FSL_QDMA_CMD_RWTTYPE_OFFSET);
234 ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
235 FSL_QDMA_CMD_LWC_OFFSET);
237 list_add_tail(&comp_temp->list, &queue->comp_free);
243 list_for_each_entry_safe(comp_temp, _comp_temp,
244 &queue->comp_free, list) {
245 list_del(&comp_temp->list);
246 rte_free(comp_temp->virt_addr);
247 rte_free(comp_temp->desc_virt_addr);
255 * Request a command descriptor for enqueue.
257 static struct fsl_qdma_comp *
258 fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
260 struct fsl_qdma_queue *queue = fsl_chan->queue;
261 struct fsl_qdma_comp *comp_temp;
263 if (!list_empty(&queue->comp_free)) {
264 comp_temp = list_first_entry(&queue->comp_free,
265 struct fsl_qdma_comp,
267 list_del(&comp_temp->list);
274 static struct fsl_qdma_queue
275 *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
277 struct fsl_qdma_queue *queue_head, *queue_temp;
281 unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
283 queue_num = fsl_qdma->n_queues;
284 blocks = fsl_qdma->num_blocks;
286 len = sizeof(*queue_head) * queue_num * blocks;
287 queue_head = rte_zmalloc("qdma: queue head", len, 0);
291 for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
292 queue_size[i] = QDMA_QUEUE_SIZE;
294 for (j = 0; j < blocks; j++) {
295 for (i = 0; i < queue_num; i++) {
296 if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
297 queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
298 DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
301 queue_temp = queue_head + i + (j * queue_num);
304 dma_pool_alloc(sizeof(struct fsl_qdma_format) *
306 sizeof(struct fsl_qdma_format) *
307 queue_size[i], &queue_temp->bus_addr);
312 memset(queue_temp->cq, 0x0, queue_size[i] *
313 sizeof(struct fsl_qdma_format));
315 queue_temp->block_base = fsl_qdma->block_base +
316 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
317 queue_temp->n_cq = queue_size[i];
319 queue_temp->count = 0;
320 queue_temp->pending = 0;
321 queue_temp->virt_head = queue_temp->cq;
322 queue_temp->stats = (struct rte_dma_stats){0};
328 for (j = 0; j < blocks; j++) {
329 for (i = 0; i < queue_num; i++) {
330 queue_temp = queue_head + i + (j * queue_num);
331 dma_pool_free(queue_temp->cq);
334 rte_free(queue_head);
340 fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
342 struct fsl_qdma_queue *status_head;
343 unsigned int status_size;
345 status_size = QDMA_STATUS_SIZE;
346 if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
347 status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
348 DPAA_QDMA_ERR("Get wrong status_size.\n");
352 status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
357 * Buffer for queue command
359 status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
361 sizeof(struct fsl_qdma_format) *
363 &status_head->bus_addr);
365 if (!status_head->cq) {
366 rte_free(status_head);
370 memset(status_head->cq, 0x0, status_size *
371 sizeof(struct fsl_qdma_format));
372 status_head->n_cq = status_size;
373 status_head->virt_head = status_head->cq;
379 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
381 void *ctrl = fsl_qdma->ctrl_base;
383 int i, count = RETRIES;
387 /* Disable the command queue and wait for idle state. */
388 reg = qdma_readl(ctrl + FSL_QDMA_DMR);
389 reg |= FSL_QDMA_DMR_DQD;
390 qdma_writel(reg, ctrl + FSL_QDMA_DMR);
391 for (j = 0; j < fsl_qdma->num_blocks; j++) {
392 block = fsl_qdma->block_base +
393 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
394 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
395 qdma_writel(0, block + FSL_QDMA_BCQMR(i));
398 reg = qdma_readl(ctrl + FSL_QDMA_DSR);
399 if (!(reg & FSL_QDMA_DSR_DB))
406 for (j = 0; j < fsl_qdma->num_blocks; j++) {
407 block = fsl_qdma->block_base +
408 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
410 /* Disable status queue. */
411 qdma_writel(0, block + FSL_QDMA_BSQMR);
414 * clear the command queue interrupt detect register for
417 qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
424 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
425 void *block, int id, const uint16_t nb_cpls,
427 enum rte_dma_status_code *status)
429 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
430 struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
431 struct fsl_qdma_queue *temp_queue;
432 struct fsl_qdma_format *status_addr;
433 struct fsl_qdma_comp *fsl_comp = NULL;
437 while (count < nb_cpls) {
438 reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
439 if (reg & FSL_QDMA_BSQSR_QE_BE)
442 status_addr = fsl_status->virt_head;
444 i = qdma_ccdf_get_queue(status_addr) +
445 id * fsl_qdma->n_queues;
446 temp_queue = fsl_queue + i;
447 fsl_comp = list_first_entry(&temp_queue->comp_used,
448 struct fsl_qdma_comp,
450 list_del(&fsl_comp->list);
452 reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
453 reg |= FSL_QDMA_BSQMR_DI_BE;
455 qdma_desc_addr_set64(status_addr, 0x0);
456 fsl_status->virt_head++;
457 if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
458 fsl_status->virt_head = fsl_status->cq;
459 qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
460 *last_idx = fsl_comp->index;
462 status[count] = RTE_DMA_STATUS_SUCCESSFUL;
464 list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
472 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
474 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
475 struct fsl_qdma_queue *temp;
476 void *ctrl = fsl_qdma->ctrl_base;
482 /* Try to halt the qDMA engine first. */
483 ret = fsl_qdma_halt(fsl_qdma);
485 DPAA_QDMA_ERR("DMA halt failed!");
489 for (j = 0; j < fsl_qdma->num_blocks; j++) {
490 block = fsl_qdma->block_base +
491 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
492 for (i = 0; i < fsl_qdma->n_queues; i++) {
493 temp = fsl_queue + i + (j * fsl_qdma->n_queues);
495 * Initialize Command Queue registers to
497 * command descriptor in memory.
498 * Dequeue Pointer Address Registers
499 * Enqueue Pointer Address Registers
502 qdma_writel(lower_32_bits(temp->bus_addr),
503 block + FSL_QDMA_BCQDPA_SADDR(i));
504 qdma_writel(upper_32_bits(temp->bus_addr),
505 block + FSL_QDMA_BCQEDPA_SADDR(i));
506 qdma_writel(lower_32_bits(temp->bus_addr),
507 block + FSL_QDMA_BCQEPA_SADDR(i));
508 qdma_writel(upper_32_bits(temp->bus_addr),
509 block + FSL_QDMA_BCQEEPA_SADDR(i));
511 /* Initialize the queue mode. */
512 reg = FSL_QDMA_BCQMR_EN;
513 reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
514 reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
515 qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
519 * Workaround for erratum: ERR010812.
520 * We must enable XOFF to avoid the enqueue rejection occurs.
521 * Setting SQCCMR ENTER_WM to 0x20.
524 qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
525 block + FSL_QDMA_SQCCMR);
528 * Initialize status queue registers to point to the first
529 * command descriptor in memory.
530 * Dequeue Pointer Address Registers
531 * Enqueue Pointer Address Registers
535 upper_32_bits(fsl_qdma->status[j]->bus_addr),
536 block + FSL_QDMA_SQEEPAR);
538 lower_32_bits(fsl_qdma->status[j]->bus_addr),
539 block + FSL_QDMA_SQEPAR);
541 upper_32_bits(fsl_qdma->status[j]->bus_addr),
542 block + FSL_QDMA_SQEDPAR);
544 lower_32_bits(fsl_qdma->status[j]->bus_addr),
545 block + FSL_QDMA_SQDPAR);
546 /* Desiable status queue interrupt. */
548 qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
549 qdma_writel(0x0, block + FSL_QDMA_BSQICR);
550 qdma_writel(0x0, block + FSL_QDMA_CQIER);
552 /* Initialize the status queue mode. */
553 reg = FSL_QDMA_BSQMR_EN;
554 val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
555 reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
556 qdma_writel(reg, block + FSL_QDMA_BSQMR);
559 reg = qdma_readl(ctrl + FSL_QDMA_DMR);
560 reg &= ~FSL_QDMA_DMR_DQD;
561 qdma_writel(reg, ctrl + FSL_QDMA_DMR);
567 fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
568 dma_addr_t src, size_t len,
572 struct fsl_qdma_comp *fsl_comp;
575 fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
579 fsl_comp->qchan = fsl_chan;
580 fsl_comp->call_back_func = call_back;
581 fsl_comp->params = param;
583 fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
584 return (void *)fsl_comp;
588 fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
589 struct fsl_qdma_comp *fsl_comp,
592 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
593 void *block = fsl_queue->block_base;
594 struct fsl_qdma_format *ccdf;
597 /* retrieve and store the register value in big endian
600 reg = qdma_readl_be(block +
601 FSL_QDMA_BCQSR(fsl_queue->id));
602 if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
605 /* filling descriptor command table */
606 ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
607 qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
608 qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
609 qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
610 fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
611 fsl_queue->virt_head++;
613 if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
614 fsl_queue->virt_head = fsl_queue->cq;
616 list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
618 if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
619 reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
620 reg |= FSL_QDMA_BCQMR_EI_BE;
621 qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
622 fsl_queue->stats.submitted++;
624 fsl_queue->pending++;
626 return fsl_comp->index;
630 fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
632 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
633 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
636 if (fsl_queue->count++)
639 INIT_LIST_HEAD(&fsl_queue->comp_free);
640 INIT_LIST_HEAD(&fsl_queue->comp_used);
642 ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
643 FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
646 "failed to alloc dma buffer for comp descriptor\n");
651 return fsl_qdma->desc_allocated++;
658 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
661 #define DPAADMA_MAX_DESC 64
662 #define DPAADMA_MIN_DESC 64
665 RTE_SET_USED(info_sz);
667 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
668 RTE_DMA_CAPA_MEM_TO_DEV |
669 RTE_DMA_CAPA_DEV_TO_DEV |
670 RTE_DMA_CAPA_DEV_TO_MEM |
671 RTE_DMA_CAPA_SILENT |
672 RTE_DMA_CAPA_OPS_COPY;
673 dev_info->max_vchans = 1;
674 dev_info->max_desc = DPAADMA_MAX_DESC;
675 dev_info->min_desc = DPAADMA_MIN_DESC;
681 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma, uint16_t vchan)
686 start = fsl_qdma->free_block_id * QDMA_QUEUES;
687 fsl_qdma->free_block_id++;
690 for (i = start; i < end; i++) {
691 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
693 if (fsl_chan->free) {
694 fsl_chan->free = false;
695 ret = fsl_qdma_alloc_chan_resources(fsl_chan);
699 fsl_qdma->vchan_map[vchan] = i;
708 dma_release(void *fsl_chan)
710 ((struct fsl_qdma_chan *)fsl_chan)->free = true;
711 fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
715 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
716 __rte_unused const struct rte_dma_conf *dev_conf,
717 __rte_unused uint32_t conf_sz)
723 dpaa_qdma_start(__rte_unused struct rte_dma_dev *dev)
729 dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
735 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
737 __rte_unused const struct rte_dma_vchan_conf *conf,
738 __rte_unused uint32_t conf_sz)
740 struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
742 return dpaa_get_channel(fsl_qdma, vchan);
746 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
748 struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
749 struct fsl_qdma_chan *fsl_chan =
750 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
751 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
752 void *block = fsl_queue->block_base;
755 while (fsl_queue->pending) {
756 reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
757 reg |= FSL_QDMA_BCQMR_EI_BE;
758 qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
759 fsl_queue->pending--;
760 fsl_queue->stats.submitted++;
767 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
768 rte_iova_t src, rte_iova_t dst,
769 uint32_t length, uint64_t flags)
771 struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
772 struct fsl_qdma_chan *fsl_chan =
773 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
776 void *fsl_comp = NULL;
778 fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
779 (dma_addr_t)dst, (dma_addr_t)src,
782 DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
785 ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
791 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
792 const uint16_t nb_cpls, uint16_t *last_idx,
793 enum rte_dma_status_code *st)
795 struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
796 int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
799 void *status = fsl_qdma->status_base;
800 struct fsl_qdma_chan *fsl_chan =
801 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
802 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
804 intr = qdma_readl_be(status + FSL_QDMA_DEDR);
806 DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
807 intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
808 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
809 intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
810 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
811 intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
812 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
813 intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
814 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
815 intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
816 DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
817 intr = qdma_readl(status + FSL_QDMA_DECBR);
818 DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
819 qdma_writel(0xffffffff,
820 status + FSL_QDMA_DEDR);
821 intr = qdma_readl(status + FSL_QDMA_DEDR);
822 fsl_queue->stats.errors++;
825 block = fsl_qdma->block_base +
826 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
828 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
830 fsl_queue->stats.completed += intr;
837 dpaa_qdma_dequeue(void *dev_private,
838 uint16_t vchan, const uint16_t nb_cpls,
839 uint16_t *last_idx, bool *has_error)
841 struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
842 int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
845 void *status = fsl_qdma->status_base;
846 struct fsl_qdma_chan *fsl_chan =
847 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
848 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
850 intr = qdma_readl_be(status + FSL_QDMA_DEDR);
852 DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
853 intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
854 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
855 intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
856 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
857 intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
858 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
859 intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
860 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
861 intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
862 DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
863 intr = qdma_readl(status + FSL_QDMA_DECBR);
864 DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
865 qdma_writel(0xffffffff,
866 status + FSL_QDMA_DEDR);
867 intr = qdma_readl(status + FSL_QDMA_DEDR);
869 fsl_queue->stats.errors++;
872 block = fsl_qdma->block_base +
873 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
875 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
877 fsl_queue->stats.completed += intr;
883 dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
884 struct rte_dma_stats *rte_stats, uint32_t size)
886 struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
887 struct fsl_qdma_chan *fsl_chan =
888 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
889 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
890 struct rte_dma_stats *stats = &fsl_queue->stats;
892 if (size < sizeof(rte_stats))
894 if (rte_stats == NULL)
903 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
905 struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
906 struct fsl_qdma_chan *fsl_chan =
907 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
908 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
910 fsl_queue->stats = (struct rte_dma_stats){0};
915 static struct rte_dma_dev_ops dpaa_qdma_ops = {
916 .dev_info_get = dpaa_info_get,
917 .dev_configure = dpaa_qdma_configure,
918 .dev_start = dpaa_qdma_start,
919 .dev_close = dpaa_qdma_close,
920 .vchan_setup = dpaa_qdma_queue_setup,
921 .stats_get = dpaa_qdma_stats_get,
922 .stats_reset = dpaa_qdma_stats_reset,
926 dpaa_qdma_init(struct rte_dma_dev *dmadev)
928 struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
929 struct fsl_qdma_chan *fsl_chan;
937 fsl_qdma->desc_allocated = 0;
938 fsl_qdma->n_chans = VIRT_CHANNELS;
939 fsl_qdma->n_queues = QDMA_QUEUES;
940 fsl_qdma->num_blocks = QDMA_BLOCKS;
941 fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
943 len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
944 fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
945 if (!fsl_qdma->chans)
948 len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
949 fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
950 if (!fsl_qdma->status) {
951 rte_free(fsl_qdma->chans);
955 for (i = 0; i < fsl_qdma->num_blocks; i++) {
956 rte_atomic32_init(&wait_task[i]);
957 fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
958 if (!fsl_qdma->status[i])
962 ccsr_qdma_fd = open("/dev/mem", O_RDWR);
963 if (unlikely(ccsr_qdma_fd < 0)) {
964 DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
968 regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
969 phys_addr = QDMA_CCSR_BASE;
970 fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
971 PROT_WRITE, MAP_SHARED,
972 ccsr_qdma_fd, phys_addr);
975 if (fsl_qdma->ctrl_base == MAP_FAILED) {
976 DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
977 "size %d\n", phys_addr, regs_size);
981 fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
982 fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
984 fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
985 if (!fsl_qdma->queue) {
986 munmap(fsl_qdma->ctrl_base, regs_size);
990 for (i = 0; i < fsl_qdma->n_chans; i++) {
991 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
993 fsl_chan->qdma = fsl_qdma;
994 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
995 fsl_qdma->num_blocks);
996 fsl_chan->free = true;
999 ret = fsl_qdma_reg_init(fsl_qdma);
1001 DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
1002 munmap(fsl_qdma->ctrl_base, regs_size);
1009 rte_free(fsl_qdma->chans);
1010 rte_free(fsl_qdma->status);
1016 dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
1017 struct rte_dpaa_device *dpaa_dev)
1019 struct rte_dma_dev *dmadev;
1022 dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
1024 sizeof(struct fsl_qdma_engine));
1026 DPAA_QDMA_ERR("Unable to allocate dmadevice");
1030 dpaa_dev->dmadev = dmadev;
1031 dmadev->dev_ops = &dpaa_qdma_ops;
1032 dmadev->device = &dpaa_dev->device;
1033 dmadev->fp_obj->dev_private = dmadev->data->dev_private;
1034 dmadev->fp_obj->copy = dpaa_qdma_enqueue;
1035 dmadev->fp_obj->submit = dpaa_qdma_submit;
1036 dmadev->fp_obj->completed = dpaa_qdma_dequeue;
1037 dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
1039 /* Invoke PMD device initialization function */
1040 ret = dpaa_qdma_init(dmadev);
1042 (void)rte_dma_pmd_release(dpaa_dev->device.name);
1046 dmadev->state = RTE_DMA_DEV_READY;
1051 dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
1053 struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
1054 struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1055 int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
1057 for (i = 0; i < max; i++) {
1058 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1060 if (fsl_chan->free == false)
1061 dma_release(fsl_chan);
1064 rte_free(fsl_qdma->status);
1065 rte_free(fsl_qdma->chans);
1067 (void)rte_dma_pmd_release(dpaa_dev->device.name);
1072 static struct rte_dpaa_driver rte_dpaa_qdma_pmd;
1074 static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
1075 .drv_type = FSL_DPAA_QDMA,
1076 .probe = dpaa_qdma_probe,
1077 .remove = dpaa_qdma_remove,
1080 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
1081 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);