dma/dpaa: support basic operations
[dpdk.git] / drivers / dma / dpaa / dpaa_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <rte_dpaa_bus.h>
6 #include <rte_dmadev_pmd.h>
7
8 #include "dpaa_qdma.h"
9 #include "dpaa_qdma_logs.h"
10
11 static inline void
12 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
13 {
14         ccdf->addr_hi = upper_32_bits(addr);
15         ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
16 }
17
18 static inline void
19 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
20 {
21         csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
22 }
23
24 static inline int
25 ilog2(int x)
26 {
27         int log = 0;
28
29         x >>= 1;
30
31         while (x) {
32                 log++;
33                 x >>= 1;
34         }
35         return log;
36 }
37
38 static u32
39 qdma_readl(void *addr)
40 {
41         return QDMA_IN(addr);
42 }
43
44 static void
45 qdma_writel(u32 val, void *addr)
46 {
47         QDMA_OUT(addr, val);
48 }
49
50 static void
51 *dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
52 {
53         void *virt_addr;
54
55         virt_addr = rte_malloc("dma pool alloc", size, aligned);
56         if (!virt_addr)
57                 return NULL;
58
59         *phy_addr = rte_mem_virt2iova(virt_addr);
60
61         return virt_addr;
62 }
63
64 static void
65 dma_pool_free(void *addr)
66 {
67         rte_free(addr);
68 }
69
70 static void
71 fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
72 {
73         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
74         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
75         struct fsl_qdma_comp *comp_temp, *_comp_temp;
76         int id;
77
78         if (--fsl_queue->count)
79                 goto finally;
80
81         id = (fsl_qdma->block_base - fsl_queue->block_base) /
82               fsl_qdma->block_offset;
83
84         while (rte_atomic32_read(&wait_task[id]) == 1)
85                 rte_delay_us(QDMA_DELAY);
86
87         list_for_each_entry_safe(comp_temp, _comp_temp,
88                                  &fsl_queue->comp_used, list) {
89                 list_del(&comp_temp->list);
90                 dma_pool_free(comp_temp->virt_addr);
91                 dma_pool_free(comp_temp->desc_virt_addr);
92                 rte_free(comp_temp);
93         }
94
95         list_for_each_entry_safe(comp_temp, _comp_temp,
96                                  &fsl_queue->comp_free, list) {
97                 list_del(&comp_temp->list);
98                 dma_pool_free(comp_temp->virt_addr);
99                 dma_pool_free(comp_temp->desc_virt_addr);
100                 rte_free(comp_temp);
101         }
102
103 finally:
104         fsl_qdma->desc_allocated--;
105 }
106
107 /*
108  * Pre-request command descriptor and compound S/G for enqueue.
109  */
110 static int
111 fsl_qdma_pre_request_enqueue_comp_sd_desc(
112                                         struct fsl_qdma_queue *queue,
113                                         int size, int aligned)
114 {
115         struct fsl_qdma_comp *comp_temp, *_comp_temp;
116         struct fsl_qdma_sdf *sdf;
117         struct fsl_qdma_ddf *ddf;
118         struct fsl_qdma_format *csgf_desc;
119         int i;
120
121         for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
122                 comp_temp = rte_zmalloc("qdma: comp temp",
123                                         sizeof(*comp_temp), 0);
124                 if (!comp_temp)
125                         return -ENOMEM;
126
127                 comp_temp->virt_addr =
128                 dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
129                 if (!comp_temp->virt_addr) {
130                         rte_free(comp_temp);
131                         goto fail;
132                 }
133
134                 comp_temp->desc_virt_addr =
135                 dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
136                 if (!comp_temp->desc_virt_addr) {
137                         rte_free(comp_temp->virt_addr);
138                         rte_free(comp_temp);
139                         goto fail;
140                 }
141
142                 memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
143                 memset(comp_temp->desc_virt_addr, 0,
144                        FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
145
146                 csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
147                 sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
148                 ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
149                 /* Compound Command Descriptor(Frame List Table) */
150                 qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
151                 /* It must be 32 as Compound S/G Descriptor */
152                 qdma_csgf_set_len(csgf_desc, 32);
153                 /* Descriptor Buffer */
154                 sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
155                                FSL_QDMA_CMD_RWTTYPE_OFFSET);
156                 ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
157                                FSL_QDMA_CMD_RWTTYPE_OFFSET);
158                 ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
159                                 FSL_QDMA_CMD_LWC_OFFSET);
160
161                 list_add_tail(&comp_temp->list, &queue->comp_free);
162         }
163
164         return 0;
165
166 fail:
167         list_for_each_entry_safe(comp_temp, _comp_temp,
168                                  &queue->comp_free, list) {
169                 list_del(&comp_temp->list);
170                 rte_free(comp_temp->virt_addr);
171                 rte_free(comp_temp->desc_virt_addr);
172                 rte_free(comp_temp);
173         }
174
175         return -ENOMEM;
176 }
177
178 static struct fsl_qdma_queue
179 *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
180 {
181         struct fsl_qdma_queue *queue_head, *queue_temp;
182         int len, i, j;
183         int queue_num;
184         int blocks;
185         unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
186
187         queue_num = fsl_qdma->n_queues;
188         blocks = fsl_qdma->num_blocks;
189
190         len = sizeof(*queue_head) * queue_num * blocks;
191         queue_head = rte_zmalloc("qdma: queue head", len, 0);
192         if (!queue_head)
193                 return NULL;
194
195         for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
196                 queue_size[i] = QDMA_QUEUE_SIZE;
197
198         for (j = 0; j < blocks; j++) {
199                 for (i = 0; i < queue_num; i++) {
200                         if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
201                             queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
202                                 DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
203                                 goto fail;
204                         }
205                         queue_temp = queue_head + i + (j * queue_num);
206
207                         queue_temp->cq =
208                         dma_pool_alloc(sizeof(struct fsl_qdma_format) *
209                                        queue_size[i],
210                                        sizeof(struct fsl_qdma_format) *
211                                        queue_size[i], &queue_temp->bus_addr);
212
213                         if (!queue_temp->cq)
214                                 goto fail;
215
216                         memset(queue_temp->cq, 0x0, queue_size[i] *
217                                sizeof(struct fsl_qdma_format));
218
219                         queue_temp->block_base = fsl_qdma->block_base +
220                                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
221                         queue_temp->n_cq = queue_size[i];
222                         queue_temp->id = i;
223                         queue_temp->count = 0;
224                         queue_temp->pending = 0;
225                         queue_temp->virt_head = queue_temp->cq;
226
227                 }
228         }
229         return queue_head;
230
231 fail:
232         for (j = 0; j < blocks; j++) {
233                 for (i = 0; i < queue_num; i++) {
234                         queue_temp = queue_head + i + (j * queue_num);
235                         dma_pool_free(queue_temp->cq);
236                 }
237         }
238         rte_free(queue_head);
239
240         return NULL;
241 }
242
243 static struct
244 fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
245 {
246         struct fsl_qdma_queue *status_head;
247         unsigned int status_size;
248
249         status_size = QDMA_STATUS_SIZE;
250         if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
251             status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
252                 DPAA_QDMA_ERR("Get wrong status_size.\n");
253                 return NULL;
254         }
255
256         status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
257         if (!status_head)
258                 return NULL;
259
260         /*
261          * Buffer for queue command
262          */
263         status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
264                                          status_size,
265                                          sizeof(struct fsl_qdma_format) *
266                                          status_size,
267                                          &status_head->bus_addr);
268
269         if (!status_head->cq) {
270                 rte_free(status_head);
271                 return NULL;
272         }
273
274         memset(status_head->cq, 0x0, status_size *
275                sizeof(struct fsl_qdma_format));
276         status_head->n_cq = status_size;
277         status_head->virt_head = status_head->cq;
278
279         return status_head;
280 }
281
282 static int
283 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
284 {
285         void *ctrl = fsl_qdma->ctrl_base;
286         void *block;
287         int i, count = RETRIES;
288         unsigned int j;
289         u32 reg;
290
291         /* Disable the command queue and wait for idle state. */
292         reg = qdma_readl(ctrl + FSL_QDMA_DMR);
293         reg |= FSL_QDMA_DMR_DQD;
294         qdma_writel(reg, ctrl + FSL_QDMA_DMR);
295         for (j = 0; j < fsl_qdma->num_blocks; j++) {
296                 block = fsl_qdma->block_base +
297                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
298                 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
299                         qdma_writel(0, block + FSL_QDMA_BCQMR(i));
300         }
301         while (true) {
302                 reg = qdma_readl(ctrl + FSL_QDMA_DSR);
303                 if (!(reg & FSL_QDMA_DSR_DB))
304                         break;
305                 if (count-- < 0)
306                         return -EBUSY;
307                 rte_delay_us(100);
308         }
309
310         for (j = 0; j < fsl_qdma->num_blocks; j++) {
311                 block = fsl_qdma->block_base +
312                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
313
314                 /* Disable status queue. */
315                 qdma_writel(0, block + FSL_QDMA_BSQMR);
316
317                 /*
318                  * clear the command queue interrupt detect register for
319                  * all queues.
320                  */
321                 qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
322         }
323
324         return 0;
325 }
326
327 static int
328 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
329 {
330         struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
331         struct fsl_qdma_queue *temp;
332         void *ctrl = fsl_qdma->ctrl_base;
333         void *block;
334         u32 i, j;
335         u32 reg;
336         int ret, val;
337
338         /* Try to halt the qDMA engine first. */
339         ret = fsl_qdma_halt(fsl_qdma);
340         if (ret) {
341                 DPAA_QDMA_ERR("DMA halt failed!");
342                 return ret;
343         }
344
345         for (j = 0; j < fsl_qdma->num_blocks; j++) {
346                 block = fsl_qdma->block_base +
347                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
348                 for (i = 0; i < fsl_qdma->n_queues; i++) {
349                         temp = fsl_queue + i + (j * fsl_qdma->n_queues);
350                         /*
351                          * Initialize Command Queue registers to
352                          * point to the first
353                          * command descriptor in memory.
354                          * Dequeue Pointer Address Registers
355                          * Enqueue Pointer Address Registers
356                          */
357
358                         qdma_writel(lower_32_bits(temp->bus_addr),
359                                     block + FSL_QDMA_BCQDPA_SADDR(i));
360                         qdma_writel(upper_32_bits(temp->bus_addr),
361                                     block + FSL_QDMA_BCQEDPA_SADDR(i));
362                         qdma_writel(lower_32_bits(temp->bus_addr),
363                                     block + FSL_QDMA_BCQEPA_SADDR(i));
364                         qdma_writel(upper_32_bits(temp->bus_addr),
365                                     block + FSL_QDMA_BCQEEPA_SADDR(i));
366
367                         /* Initialize the queue mode. */
368                         reg = FSL_QDMA_BCQMR_EN;
369                         reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
370                         reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
371                         qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
372                 }
373
374                 /*
375                  * Workaround for erratum: ERR010812.
376                  * We must enable XOFF to avoid the enqueue rejection occurs.
377                  * Setting SQCCMR ENTER_WM to 0x20.
378                  */
379
380                 qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
381                             block + FSL_QDMA_SQCCMR);
382
383                 /*
384                  * Initialize status queue registers to point to the first
385                  * command descriptor in memory.
386                  * Dequeue Pointer Address Registers
387                  * Enqueue Pointer Address Registers
388                  */
389
390                 qdma_writel(
391                             upper_32_bits(fsl_qdma->status[j]->bus_addr),
392                             block + FSL_QDMA_SQEEPAR);
393                 qdma_writel(
394                             lower_32_bits(fsl_qdma->status[j]->bus_addr),
395                             block + FSL_QDMA_SQEPAR);
396                 qdma_writel(
397                             upper_32_bits(fsl_qdma->status[j]->bus_addr),
398                             block + FSL_QDMA_SQEDPAR);
399                 qdma_writel(
400                             lower_32_bits(fsl_qdma->status[j]->bus_addr),
401                             block + FSL_QDMA_SQDPAR);
402                 /* Desiable status queue interrupt. */
403
404                 qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
405                 qdma_writel(0x0, block + FSL_QDMA_BSQICR);
406                 qdma_writel(0x0, block + FSL_QDMA_CQIER);
407
408                 /* Initialize the status queue mode. */
409                 reg = FSL_QDMA_BSQMR_EN;
410                 val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
411                 reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
412                 qdma_writel(reg, block + FSL_QDMA_BSQMR);
413         }
414
415         reg = qdma_readl(ctrl + FSL_QDMA_DMR);
416         reg &= ~FSL_QDMA_DMR_DQD;
417         qdma_writel(reg, ctrl + FSL_QDMA_DMR);
418
419         return 0;
420 }
421
422 static int
423 fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
424 {
425         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
426         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
427         int ret;
428
429         if (fsl_queue->count++)
430                 goto finally;
431
432         INIT_LIST_HEAD(&fsl_queue->comp_free);
433         INIT_LIST_HEAD(&fsl_queue->comp_used);
434
435         ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
436                                 FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
437         if (ret) {
438                 DPAA_QDMA_ERR(
439                         "failed to alloc dma buffer for comp descriptor\n");
440                 goto exit;
441         }
442
443 finally:
444         return fsl_qdma->desc_allocated++;
445
446 exit:
447         return -ENOMEM;
448 }
449
450 static int
451 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
452               uint32_t info_sz)
453 {
454 #define DPAADMA_MAX_DESC        64
455 #define DPAADMA_MIN_DESC        64
456
457         RTE_SET_USED(dev);
458         RTE_SET_USED(info_sz);
459
460         dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
461                              RTE_DMA_CAPA_MEM_TO_DEV |
462                              RTE_DMA_CAPA_DEV_TO_DEV |
463                              RTE_DMA_CAPA_DEV_TO_MEM |
464                              RTE_DMA_CAPA_SILENT |
465                              RTE_DMA_CAPA_OPS_COPY;
466         dev_info->max_vchans = 1;
467         dev_info->max_desc = DPAADMA_MAX_DESC;
468         dev_info->min_desc = DPAADMA_MIN_DESC;
469
470         return 0;
471 }
472
473 static int
474 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
475 {
476         u32 i, start, end;
477         int ret;
478
479         start = fsl_qdma->free_block_id * QDMA_QUEUES;
480         fsl_qdma->free_block_id++;
481
482         end = start + 1;
483         for (i = start; i < end; i++) {
484                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
485
486                 if (fsl_chan->free) {
487                         fsl_chan->free = false;
488                         ret = fsl_qdma_alloc_chan_resources(fsl_chan);
489                         if (ret)
490                                 return ret;
491
492                         fsl_qdma->vchan_map[vchan] = i;
493                         return 0;
494                 }
495         }
496
497         return -1;
498 }
499
500 static void
501 dma_release(void *fsl_chan)
502 {
503         ((struct fsl_qdma_chan *)fsl_chan)->free = true;
504         fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
505 }
506
507 static int
508 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
509                     __rte_unused const struct rte_dma_conf *dev_conf,
510                     __rte_unused uint32_t conf_sz)
511 {
512         return 0;
513 }
514
515 static int
516 dpaa_qdma_start(__rte_unused struct rte_dma_dev *dev)
517 {
518         return 0;
519 }
520
521 static int
522 dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
523 {
524         return 0;
525 }
526
527 static int
528 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
529                       uint16_t vchan,
530                       __rte_unused const struct rte_dma_vchan_conf *conf,
531                       __rte_unused uint32_t conf_sz)
532 {
533         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
534
535         return dpaa_get_channel(fsl_qdma, vchan);
536 }
537
538 static struct rte_dma_dev_ops dpaa_qdma_ops = {
539         .dev_info_get             = dpaa_info_get,
540         .dev_configure            = dpaa_qdma_configure,
541         .dev_start                = dpaa_qdma_start,
542         .dev_close                = dpaa_qdma_close,
543         .vchan_setup              = dpaa_qdma_queue_setup,
544 };
545
546 static int
547 dpaa_qdma_init(struct rte_dma_dev *dmadev)
548 {
549         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
550         struct fsl_qdma_chan *fsl_chan;
551         uint64_t phys_addr;
552         unsigned int len;
553         int ccsr_qdma_fd;
554         int regs_size;
555         int ret;
556         u32 i;
557
558         fsl_qdma->desc_allocated = 0;
559         fsl_qdma->n_chans = VIRT_CHANNELS;
560         fsl_qdma->n_queues = QDMA_QUEUES;
561         fsl_qdma->num_blocks = QDMA_BLOCKS;
562         fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
563
564         len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
565         fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
566         if (!fsl_qdma->chans)
567                 return -1;
568
569         len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
570         fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
571         if (!fsl_qdma->status) {
572                 rte_free(fsl_qdma->chans);
573                 return -1;
574         }
575
576         for (i = 0; i < fsl_qdma->num_blocks; i++) {
577                 rte_atomic32_init(&wait_task[i]);
578                 fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
579                 if (!fsl_qdma->status[i])
580                         goto err;
581         }
582
583         ccsr_qdma_fd = open("/dev/mem", O_RDWR);
584         if (unlikely(ccsr_qdma_fd < 0)) {
585                 DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
586                 goto err;
587         }
588
589         regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
590         phys_addr = QDMA_CCSR_BASE;
591         fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
592                                          PROT_WRITE, MAP_SHARED,
593                                          ccsr_qdma_fd, phys_addr);
594
595         close(ccsr_qdma_fd);
596         if (fsl_qdma->ctrl_base == MAP_FAILED) {
597                 DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
598                        "size %d\n", phys_addr, regs_size);
599                 goto err;
600         }
601
602         fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
603         fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
604
605         fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
606         if (!fsl_qdma->queue) {
607                 munmap(fsl_qdma->ctrl_base, regs_size);
608                 goto err;
609         }
610
611         for (i = 0; i < fsl_qdma->n_chans; i++) {
612                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
613
614                 fsl_chan->qdma = fsl_qdma;
615                 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
616                                                         fsl_qdma->num_blocks);
617                 fsl_chan->free = true;
618         }
619
620         ret = fsl_qdma_reg_init(fsl_qdma);
621         if (ret) {
622                 DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
623                 munmap(fsl_qdma->ctrl_base, regs_size);
624                 goto err;
625         }
626
627         return 0;
628
629 err:
630         rte_free(fsl_qdma->chans);
631         rte_free(fsl_qdma->status);
632
633         return -1;
634 }
635
636 static int
637 dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
638                 struct rte_dpaa_device *dpaa_dev)
639 {
640         struct rte_dma_dev *dmadev;
641         int ret;
642
643         dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
644                                       rte_socket_id(),
645                                       sizeof(struct fsl_qdma_engine));
646         if (!dmadev) {
647                 DPAA_QDMA_ERR("Unable to allocate dmadevice");
648                 return -EINVAL;
649         }
650
651         dpaa_dev->dmadev = dmadev;
652         dmadev->dev_ops = &dpaa_qdma_ops;
653         dmadev->device = &dpaa_dev->device;
654         dmadev->fp_obj->dev_private = dmadev->data->dev_private;
655
656         /* Invoke PMD device initialization function */
657         ret = dpaa_qdma_init(dmadev);
658         if (ret) {
659                 (void)rte_dma_pmd_release(dpaa_dev->device.name);
660                 return ret;
661         }
662
663         dmadev->state = RTE_DMA_DEV_READY;
664         return 0;
665 }
666
667 static int
668 dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
669 {
670         struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
671         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
672         int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
673
674         for (i = 0; i < max; i++) {
675                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
676
677                 if (fsl_chan->free == false)
678                         dma_release(fsl_chan);
679         }
680
681         rte_free(fsl_qdma->status);
682         rte_free(fsl_qdma->chans);
683
684         (void)rte_dma_pmd_release(dpaa_dev->device.name);
685
686         return 0;
687 }
688
689 static struct rte_dpaa_driver rte_dpaa_qdma_pmd;
690
691 static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
692         .drv_type = FSL_DPAA_QDMA,
693         .probe = dpaa_qdma_probe,
694         .remove = dpaa_qdma_remove,
695 };
696
697 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
698 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);