common/mlx5: update log for DevX general command failure
[dpdk.git] / drivers / dma / dpaa / dpaa_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <rte_dpaa_bus.h>
6 #include <rte_dmadev_pmd.h>
7
8 #include "dpaa_qdma.h"
9 #include "dpaa_qdma_logs.h"
10
11 static inline void
12 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
13 {
14         ccdf->addr_hi = upper_32_bits(addr);
15         ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
16 }
17
18 static inline u64
19 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
20 {
21         return ccdf->cfg8b_w1 & 0xff;
22 }
23
24 static inline int
25 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
26 {
27         return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
28                 >> QDMA_CCDF_OFFSET;
29 }
30
31 static inline void
32 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
33 {
34         ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
35 }
36
37 static inline int
38 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
39 {
40         return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
41                 >> QDMA_CCDF_STATUS;
42 }
43
44 static inline void
45 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
46 {
47         ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
48 }
49
50 static inline void
51 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
52 {
53         csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
54 }
55
56 static inline void
57 qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
58 {
59         csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
60 }
61
62 static inline int
63 ilog2(int x)
64 {
65         int log = 0;
66
67         x >>= 1;
68
69         while (x) {
70                 log++;
71                 x >>= 1;
72         }
73         return log;
74 }
75
76 static u32
77 qdma_readl(void *addr)
78 {
79         return QDMA_IN(addr);
80 }
81
82 static void
83 qdma_writel(u32 val, void *addr)
84 {
85         QDMA_OUT(addr, val);
86 }
87
88 static u32
89 qdma_readl_be(void *addr)
90 {
91         return QDMA_IN_BE(addr);
92 }
93
94 static void
95 qdma_writel_be(u32 val, void *addr)
96 {
97         QDMA_OUT_BE(addr, val);
98 }
99
100 static void
101 *dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
102 {
103         void *virt_addr;
104
105         virt_addr = rte_malloc("dma pool alloc", size, aligned);
106         if (!virt_addr)
107                 return NULL;
108
109         *phy_addr = rte_mem_virt2iova(virt_addr);
110
111         return virt_addr;
112 }
113
114 static void
115 dma_pool_free(void *addr)
116 {
117         rte_free(addr);
118 }
119
120 static void
121 fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
122 {
123         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
124         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
125         struct fsl_qdma_comp *comp_temp, *_comp_temp;
126         int id;
127
128         if (--fsl_queue->count)
129                 goto finally;
130
131         id = (fsl_qdma->block_base - fsl_queue->block_base) /
132               fsl_qdma->block_offset;
133
134         while (rte_atomic32_read(&wait_task[id]) == 1)
135                 rte_delay_us(QDMA_DELAY);
136
137         list_for_each_entry_safe(comp_temp, _comp_temp,
138                                  &fsl_queue->comp_used, list) {
139                 list_del(&comp_temp->list);
140                 dma_pool_free(comp_temp->virt_addr);
141                 dma_pool_free(comp_temp->desc_virt_addr);
142                 rte_free(comp_temp);
143         }
144
145         list_for_each_entry_safe(comp_temp, _comp_temp,
146                                  &fsl_queue->comp_free, list) {
147                 list_del(&comp_temp->list);
148                 dma_pool_free(comp_temp->virt_addr);
149                 dma_pool_free(comp_temp->desc_virt_addr);
150                 rte_free(comp_temp);
151         }
152
153 finally:
154         fsl_qdma->desc_allocated--;
155 }
156
157 static void
158 fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
159                                       dma_addr_t dst, dma_addr_t src, u32 len)
160 {
161         struct fsl_qdma_format *csgf_src, *csgf_dest;
162
163         /* Note: command table (fsl_comp->virt_addr) is getting filled
164          * directly in cmd descriptors of queues while enqueuing the descriptor
165          * please refer fsl_qdma_enqueue_desc
166          * frame list table (virt_addr) + 1) and source,
167          * destination descriptor table
168          * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
169          * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
170          */
171         csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
172         csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
173
174         /* Status notification is enqueued to status queue. */
175         qdma_desc_addr_set64(csgf_src, src);
176         qdma_csgf_set_len(csgf_src, len);
177         qdma_desc_addr_set64(csgf_dest, dst);
178         qdma_csgf_set_len(csgf_dest, len);
179         /* This entry is the last entry. */
180         qdma_csgf_set_f(csgf_dest, len);
181 }
182
183 /*
184  * Pre-request command descriptor and compound S/G for enqueue.
185  */
186 static int
187 fsl_qdma_pre_request_enqueue_comp_sd_desc(
188                                         struct fsl_qdma_queue *queue,
189                                         int size, int aligned)
190 {
191         struct fsl_qdma_comp *comp_temp, *_comp_temp;
192         struct fsl_qdma_sdf *sdf;
193         struct fsl_qdma_ddf *ddf;
194         struct fsl_qdma_format *csgf_desc;
195         int i;
196
197         for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
198                 comp_temp = rte_zmalloc("qdma: comp temp",
199                                         sizeof(*comp_temp), 0);
200                 if (!comp_temp)
201                         return -ENOMEM;
202
203                 comp_temp->virt_addr =
204                 dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
205                 if (!comp_temp->virt_addr) {
206                         rte_free(comp_temp);
207                         goto fail;
208                 }
209
210                 comp_temp->desc_virt_addr =
211                 dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
212                 if (!comp_temp->desc_virt_addr) {
213                         rte_free(comp_temp->virt_addr);
214                         rte_free(comp_temp);
215                         goto fail;
216                 }
217
218                 memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
219                 memset(comp_temp->desc_virt_addr, 0,
220                        FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
221
222                 csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
223                 sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
224                 ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
225                 /* Compound Command Descriptor(Frame List Table) */
226                 qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
227                 /* It must be 32 as Compound S/G Descriptor */
228                 qdma_csgf_set_len(csgf_desc, 32);
229                 /* Descriptor Buffer */
230                 sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
231                                FSL_QDMA_CMD_RWTTYPE_OFFSET);
232                 ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
233                                FSL_QDMA_CMD_RWTTYPE_OFFSET);
234                 ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
235                                 FSL_QDMA_CMD_LWC_OFFSET);
236
237                 list_add_tail(&comp_temp->list, &queue->comp_free);
238         }
239
240         return 0;
241
242 fail:
243         list_for_each_entry_safe(comp_temp, _comp_temp,
244                                  &queue->comp_free, list) {
245                 list_del(&comp_temp->list);
246                 rte_free(comp_temp->virt_addr);
247                 rte_free(comp_temp->desc_virt_addr);
248                 rte_free(comp_temp);
249         }
250
251         return -ENOMEM;
252 }
253
254 /*
255  * Request a command descriptor for enqueue.
256  */
257 static struct fsl_qdma_comp *
258 fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
259 {
260         struct fsl_qdma_queue *queue = fsl_chan->queue;
261         struct fsl_qdma_comp *comp_temp;
262
263         if (!list_empty(&queue->comp_free)) {
264                 comp_temp = list_first_entry(&queue->comp_free,
265                                              struct fsl_qdma_comp,
266                                              list);
267                 list_del(&comp_temp->list);
268                 return comp_temp;
269         }
270
271         return NULL;
272 }
273
274 static struct fsl_qdma_queue
275 *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
276 {
277         struct fsl_qdma_queue *queue_head, *queue_temp;
278         int len, i, j;
279         int queue_num;
280         int blocks;
281         unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
282
283         queue_num = fsl_qdma->n_queues;
284         blocks = fsl_qdma->num_blocks;
285
286         len = sizeof(*queue_head) * queue_num * blocks;
287         queue_head = rte_zmalloc("qdma: queue head", len, 0);
288         if (!queue_head)
289                 return NULL;
290
291         for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
292                 queue_size[i] = QDMA_QUEUE_SIZE;
293
294         for (j = 0; j < blocks; j++) {
295                 for (i = 0; i < queue_num; i++) {
296                         if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
297                             queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
298                                 DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
299                                 goto fail;
300                         }
301                         queue_temp = queue_head + i + (j * queue_num);
302
303                         queue_temp->cq =
304                         dma_pool_alloc(sizeof(struct fsl_qdma_format) *
305                                        queue_size[i],
306                                        sizeof(struct fsl_qdma_format) *
307                                        queue_size[i], &queue_temp->bus_addr);
308
309                         if (!queue_temp->cq)
310                                 goto fail;
311
312                         memset(queue_temp->cq, 0x0, queue_size[i] *
313                                sizeof(struct fsl_qdma_format));
314
315                         queue_temp->block_base = fsl_qdma->block_base +
316                                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
317                         queue_temp->n_cq = queue_size[i];
318                         queue_temp->id = i;
319                         queue_temp->count = 0;
320                         queue_temp->pending = 0;
321                         queue_temp->virt_head = queue_temp->cq;
322                         queue_temp->stats = (struct rte_dma_stats){0};
323                 }
324         }
325         return queue_head;
326
327 fail:
328         for (j = 0; j < blocks; j++) {
329                 for (i = 0; i < queue_num; i++) {
330                         queue_temp = queue_head + i + (j * queue_num);
331                         dma_pool_free(queue_temp->cq);
332                 }
333         }
334         rte_free(queue_head);
335
336         return NULL;
337 }
338
339 static struct
340 fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
341 {
342         struct fsl_qdma_queue *status_head;
343         unsigned int status_size;
344
345         status_size = QDMA_STATUS_SIZE;
346         if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
347             status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
348                 DPAA_QDMA_ERR("Get wrong status_size.\n");
349                 return NULL;
350         }
351
352         status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
353         if (!status_head)
354                 return NULL;
355
356         /*
357          * Buffer for queue command
358          */
359         status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
360                                          status_size,
361                                          sizeof(struct fsl_qdma_format) *
362                                          status_size,
363                                          &status_head->bus_addr);
364
365         if (!status_head->cq) {
366                 rte_free(status_head);
367                 return NULL;
368         }
369
370         memset(status_head->cq, 0x0, status_size *
371                sizeof(struct fsl_qdma_format));
372         status_head->n_cq = status_size;
373         status_head->virt_head = status_head->cq;
374
375         return status_head;
376 }
377
378 static int
379 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
380 {
381         void *ctrl = fsl_qdma->ctrl_base;
382         void *block;
383         int i, count = RETRIES;
384         unsigned int j;
385         u32 reg;
386
387         /* Disable the command queue and wait for idle state. */
388         reg = qdma_readl(ctrl + FSL_QDMA_DMR);
389         reg |= FSL_QDMA_DMR_DQD;
390         qdma_writel(reg, ctrl + FSL_QDMA_DMR);
391         for (j = 0; j < fsl_qdma->num_blocks; j++) {
392                 block = fsl_qdma->block_base +
393                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
394                 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
395                         qdma_writel(0, block + FSL_QDMA_BCQMR(i));
396         }
397         while (true) {
398                 reg = qdma_readl(ctrl + FSL_QDMA_DSR);
399                 if (!(reg & FSL_QDMA_DSR_DB))
400                         break;
401                 if (count-- < 0)
402                         return -EBUSY;
403                 rte_delay_us(100);
404         }
405
406         for (j = 0; j < fsl_qdma->num_blocks; j++) {
407                 block = fsl_qdma->block_base +
408                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
409
410                 /* Disable status queue. */
411                 qdma_writel(0, block + FSL_QDMA_BSQMR);
412
413                 /*
414                  * clear the command queue interrupt detect register for
415                  * all queues.
416                  */
417                 qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
418         }
419
420         return 0;
421 }
422
423 static int
424 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
425                                  void *block, int id, const uint16_t nb_cpls,
426                                  uint16_t *last_idx,
427                                  enum rte_dma_status_code *status)
428 {
429         struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
430         struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
431         struct fsl_qdma_queue *temp_queue;
432         struct fsl_qdma_format *status_addr;
433         struct fsl_qdma_comp *fsl_comp = NULL;
434         u32 reg, i;
435         int count = 0;
436
437         while (count < nb_cpls) {
438                 reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
439                 if (reg & FSL_QDMA_BSQSR_QE_BE)
440                         return count;
441
442                 status_addr = fsl_status->virt_head;
443
444                 i = qdma_ccdf_get_queue(status_addr) +
445                         id * fsl_qdma->n_queues;
446                 temp_queue = fsl_queue + i;
447                 fsl_comp = list_first_entry(&temp_queue->comp_used,
448                                             struct fsl_qdma_comp,
449                                             list);
450                 list_del(&fsl_comp->list);
451
452                 reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
453                 reg |= FSL_QDMA_BSQMR_DI_BE;
454
455                 qdma_desc_addr_set64(status_addr, 0x0);
456                 fsl_status->virt_head++;
457                 if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
458                         fsl_status->virt_head = fsl_status->cq;
459                 qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
460                 *last_idx = fsl_comp->index;
461                 if (status != NULL)
462                         status[count] = RTE_DMA_STATUS_SUCCESSFUL;
463
464                 list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
465                 count++;
466
467         }
468         return count;
469 }
470
471 static int
472 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
473 {
474         struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
475         struct fsl_qdma_queue *temp;
476         void *ctrl = fsl_qdma->ctrl_base;
477         void *block;
478         u32 i, j;
479         u32 reg;
480         int ret, val;
481
482         /* Try to halt the qDMA engine first. */
483         ret = fsl_qdma_halt(fsl_qdma);
484         if (ret) {
485                 DPAA_QDMA_ERR("DMA halt failed!");
486                 return ret;
487         }
488
489         for (j = 0; j < fsl_qdma->num_blocks; j++) {
490                 block = fsl_qdma->block_base +
491                         FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
492                 for (i = 0; i < fsl_qdma->n_queues; i++) {
493                         temp = fsl_queue + i + (j * fsl_qdma->n_queues);
494                         /*
495                          * Initialize Command Queue registers to
496                          * point to the first
497                          * command descriptor in memory.
498                          * Dequeue Pointer Address Registers
499                          * Enqueue Pointer Address Registers
500                          */
501
502                         qdma_writel(lower_32_bits(temp->bus_addr),
503                                     block + FSL_QDMA_BCQDPA_SADDR(i));
504                         qdma_writel(upper_32_bits(temp->bus_addr),
505                                     block + FSL_QDMA_BCQEDPA_SADDR(i));
506                         qdma_writel(lower_32_bits(temp->bus_addr),
507                                     block + FSL_QDMA_BCQEPA_SADDR(i));
508                         qdma_writel(upper_32_bits(temp->bus_addr),
509                                     block + FSL_QDMA_BCQEEPA_SADDR(i));
510
511                         /* Initialize the queue mode. */
512                         reg = FSL_QDMA_BCQMR_EN;
513                         reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
514                         reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
515                         qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
516                 }
517
518                 /*
519                  * Workaround for erratum: ERR010812.
520                  * We must enable XOFF to avoid the enqueue rejection occurs.
521                  * Setting SQCCMR ENTER_WM to 0x20.
522                  */
523
524                 qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
525                             block + FSL_QDMA_SQCCMR);
526
527                 /*
528                  * Initialize status queue registers to point to the first
529                  * command descriptor in memory.
530                  * Dequeue Pointer Address Registers
531                  * Enqueue Pointer Address Registers
532                  */
533
534                 qdma_writel(
535                             upper_32_bits(fsl_qdma->status[j]->bus_addr),
536                             block + FSL_QDMA_SQEEPAR);
537                 qdma_writel(
538                             lower_32_bits(fsl_qdma->status[j]->bus_addr),
539                             block + FSL_QDMA_SQEPAR);
540                 qdma_writel(
541                             upper_32_bits(fsl_qdma->status[j]->bus_addr),
542                             block + FSL_QDMA_SQEDPAR);
543                 qdma_writel(
544                             lower_32_bits(fsl_qdma->status[j]->bus_addr),
545                             block + FSL_QDMA_SQDPAR);
546                 /* Desiable status queue interrupt. */
547
548                 qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
549                 qdma_writel(0x0, block + FSL_QDMA_BSQICR);
550                 qdma_writel(0x0, block + FSL_QDMA_CQIER);
551
552                 /* Initialize the status queue mode. */
553                 reg = FSL_QDMA_BSQMR_EN;
554                 val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
555                 reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
556                 qdma_writel(reg, block + FSL_QDMA_BSQMR);
557         }
558
559         reg = qdma_readl(ctrl + FSL_QDMA_DMR);
560         reg &= ~FSL_QDMA_DMR_DQD;
561         qdma_writel(reg, ctrl + FSL_QDMA_DMR);
562
563         return 0;
564 }
565
566 static void *
567 fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
568                            dma_addr_t src, size_t len,
569                            void *call_back,
570                            void *param)
571 {
572         struct fsl_qdma_comp *fsl_comp;
573
574         fsl_comp =
575         fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
576         if (!fsl_comp)
577                 return NULL;
578
579         fsl_comp->qchan = fsl_chan;
580         fsl_comp->call_back_func = call_back;
581         fsl_comp->params = param;
582
583         fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
584         return (void *)fsl_comp;
585 }
586
587 static int
588 fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
589                                   struct fsl_qdma_comp *fsl_comp,
590                                   uint64_t flags)
591 {
592         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
593         void *block = fsl_queue->block_base;
594         struct fsl_qdma_format *ccdf;
595         u32 reg;
596
597         /* retrieve and store the register value in big endian
598          * to avoid bits swap
599          */
600         reg = qdma_readl_be(block +
601                          FSL_QDMA_BCQSR(fsl_queue->id));
602         if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
603                 return -1;
604
605         /* filling descriptor  command table */
606         ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
607         qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
608         qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
609         qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
610         fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
611         fsl_queue->virt_head++;
612
613         if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
614                 fsl_queue->virt_head = fsl_queue->cq;
615
616         list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
617
618         if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
619                 reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
620                 reg |= FSL_QDMA_BCQMR_EI_BE;
621                 qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
622                 fsl_queue->stats.submitted++;
623         } else {
624                 fsl_queue->pending++;
625         }
626         return fsl_comp->index;
627 }
628
629 static int
630 fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
631 {
632         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
633         struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
634         int ret;
635
636         if (fsl_queue->count++)
637                 goto finally;
638
639         INIT_LIST_HEAD(&fsl_queue->comp_free);
640         INIT_LIST_HEAD(&fsl_queue->comp_used);
641
642         ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
643                                 FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
644         if (ret) {
645                 DPAA_QDMA_ERR(
646                         "failed to alloc dma buffer for comp descriptor\n");
647                 goto exit;
648         }
649
650 finally:
651         return fsl_qdma->desc_allocated++;
652
653 exit:
654         return -ENOMEM;
655 }
656
657 static int
658 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
659               uint32_t info_sz)
660 {
661 #define DPAADMA_MAX_DESC        64
662 #define DPAADMA_MIN_DESC        64
663
664         RTE_SET_USED(dev);
665         RTE_SET_USED(info_sz);
666
667         dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
668                              RTE_DMA_CAPA_MEM_TO_DEV |
669                              RTE_DMA_CAPA_DEV_TO_DEV |
670                              RTE_DMA_CAPA_DEV_TO_MEM |
671                              RTE_DMA_CAPA_SILENT |
672                              RTE_DMA_CAPA_OPS_COPY;
673         dev_info->max_vchans = 1;
674         dev_info->max_desc = DPAADMA_MAX_DESC;
675         dev_info->min_desc = DPAADMA_MIN_DESC;
676
677         return 0;
678 }
679
680 static int
681 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
682 {
683         u32 i, start, end;
684         int ret;
685
686         start = fsl_qdma->free_block_id * QDMA_QUEUES;
687         fsl_qdma->free_block_id++;
688
689         end = start + 1;
690         for (i = start; i < end; i++) {
691                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
692
693                 if (fsl_chan->free) {
694                         fsl_chan->free = false;
695                         ret = fsl_qdma_alloc_chan_resources(fsl_chan);
696                         if (ret)
697                                 return ret;
698
699                         fsl_qdma->vchan_map[vchan] = i;
700                         return 0;
701                 }
702         }
703
704         return -1;
705 }
706
707 static void
708 dma_release(void *fsl_chan)
709 {
710         ((struct fsl_qdma_chan *)fsl_chan)->free = true;
711         fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
712 }
713
714 static int
715 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
716                     __rte_unused const struct rte_dma_conf *dev_conf,
717                     __rte_unused uint32_t conf_sz)
718 {
719         return 0;
720 }
721
722 static int
723 dpaa_qdma_start(__rte_unused struct rte_dma_dev *dev)
724 {
725         return 0;
726 }
727
728 static int
729 dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
730 {
731         return 0;
732 }
733
734 static int
735 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
736                       uint16_t vchan,
737                       __rte_unused const struct rte_dma_vchan_conf *conf,
738                       __rte_unused uint32_t conf_sz)
739 {
740         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
741
742         return dpaa_get_channel(fsl_qdma, vchan);
743 }
744
745 static int
746 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
747 {
748         struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
749         struct fsl_qdma_chan *fsl_chan =
750                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
751         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
752         void *block = fsl_queue->block_base;
753         u32 reg;
754
755         while (fsl_queue->pending) {
756                 reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
757                 reg |= FSL_QDMA_BCQMR_EI_BE;
758                 qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
759                 fsl_queue->pending--;
760                 fsl_queue->stats.submitted++;
761         }
762
763         return 0;
764 }
765
766 static int
767 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
768                   rte_iova_t src, rte_iova_t dst,
769                   uint32_t length, uint64_t flags)
770 {
771         struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
772         struct fsl_qdma_chan *fsl_chan =
773                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
774         int ret;
775
776         void *fsl_comp = NULL;
777
778         fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
779                         (dma_addr_t)dst, (dma_addr_t)src,
780                         length, NULL, NULL);
781         if (!fsl_comp) {
782                 DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
783                 return -1;
784         }
785         ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
786
787         return ret;
788 }
789
790 static uint16_t
791 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
792                          const uint16_t nb_cpls, uint16_t *last_idx,
793                          enum rte_dma_status_code *st)
794 {
795         struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
796         int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
797         void *block;
798         int intr;
799         void *status = fsl_qdma->status_base;
800         struct fsl_qdma_chan *fsl_chan =
801                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
802         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
803
804         intr = qdma_readl_be(status + FSL_QDMA_DEDR);
805         if (intr) {
806                 DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
807                 intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
808                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
809                 intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
810                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
811                 intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
812                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
813                 intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
814                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
815                 intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
816                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
817                 intr = qdma_readl(status + FSL_QDMA_DECBR);
818                 DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
819                 qdma_writel(0xffffffff,
820                             status + FSL_QDMA_DEDR);
821                 intr = qdma_readl(status + FSL_QDMA_DEDR);
822                 fsl_queue->stats.errors++;
823         }
824
825         block = fsl_qdma->block_base +
826                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
827
828         intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
829                                                 last_idx, st);
830         fsl_queue->stats.completed += intr;
831
832         return intr;
833 }
834
835
836 static uint16_t
837 dpaa_qdma_dequeue(void *dev_private,
838                   uint16_t vchan, const uint16_t nb_cpls,
839                   uint16_t *last_idx, bool *has_error)
840 {
841         struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
842         int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
843         void *block;
844         int intr;
845         void *status = fsl_qdma->status_base;
846         struct fsl_qdma_chan *fsl_chan =
847                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
848         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
849
850         intr = qdma_readl_be(status + FSL_QDMA_DEDR);
851         if (intr) {
852                 DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
853                 intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
854                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
855                 intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
856                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
857                 intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
858                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
859                 intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
860                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
861                 intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
862                 DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
863                 intr = qdma_readl(status + FSL_QDMA_DECBR);
864                 DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
865                 qdma_writel(0xffffffff,
866                             status + FSL_QDMA_DEDR);
867                 intr = qdma_readl(status + FSL_QDMA_DEDR);
868                 *has_error = true;
869                 fsl_queue->stats.errors++;
870         }
871
872         block = fsl_qdma->block_base +
873                 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
874
875         intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
876                                                 last_idx, NULL);
877         fsl_queue->stats.completed += intr;
878
879         return intr;
880 }
881
882 static int
883 dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
884                     struct rte_dma_stats *rte_stats, uint32_t size)
885 {
886         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
887         struct fsl_qdma_chan *fsl_chan =
888                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
889         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
890         struct rte_dma_stats *stats = &fsl_queue->stats;
891
892         if (size < sizeof(rte_stats))
893                 return -EINVAL;
894         if (rte_stats == NULL)
895                 return -EINVAL;
896
897         *rte_stats = *stats;
898
899         return 0;
900 }
901
902 static int
903 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
904 {
905         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
906         struct fsl_qdma_chan *fsl_chan =
907                 &fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
908         struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
909
910         fsl_queue->stats = (struct rte_dma_stats){0};
911
912         return 0;
913 }
914
915 static struct rte_dma_dev_ops dpaa_qdma_ops = {
916         .dev_info_get             = dpaa_info_get,
917         .dev_configure            = dpaa_qdma_configure,
918         .dev_start                = dpaa_qdma_start,
919         .dev_close                = dpaa_qdma_close,
920         .vchan_setup              = dpaa_qdma_queue_setup,
921         .stats_get                = dpaa_qdma_stats_get,
922         .stats_reset              = dpaa_qdma_stats_reset,
923 };
924
925 static int
926 dpaa_qdma_init(struct rte_dma_dev *dmadev)
927 {
928         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
929         struct fsl_qdma_chan *fsl_chan;
930         uint64_t phys_addr;
931         unsigned int len;
932         int ccsr_qdma_fd;
933         int regs_size;
934         int ret;
935         u32 i;
936
937         fsl_qdma->desc_allocated = 0;
938         fsl_qdma->n_chans = VIRT_CHANNELS;
939         fsl_qdma->n_queues = QDMA_QUEUES;
940         fsl_qdma->num_blocks = QDMA_BLOCKS;
941         fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
942
943         len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
944         fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
945         if (!fsl_qdma->chans)
946                 return -1;
947
948         len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
949         fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
950         if (!fsl_qdma->status) {
951                 rte_free(fsl_qdma->chans);
952                 return -1;
953         }
954
955         for (i = 0; i < fsl_qdma->num_blocks; i++) {
956                 rte_atomic32_init(&wait_task[i]);
957                 fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
958                 if (!fsl_qdma->status[i])
959                         goto err;
960         }
961
962         ccsr_qdma_fd = open("/dev/mem", O_RDWR);
963         if (unlikely(ccsr_qdma_fd < 0)) {
964                 DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
965                 goto err;
966         }
967
968         regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
969         phys_addr = QDMA_CCSR_BASE;
970         fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
971                                          PROT_WRITE, MAP_SHARED,
972                                          ccsr_qdma_fd, phys_addr);
973
974         close(ccsr_qdma_fd);
975         if (fsl_qdma->ctrl_base == MAP_FAILED) {
976                 DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
977                        "size %d\n", phys_addr, regs_size);
978                 goto err;
979         }
980
981         fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
982         fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
983
984         fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
985         if (!fsl_qdma->queue) {
986                 munmap(fsl_qdma->ctrl_base, regs_size);
987                 goto err;
988         }
989
990         for (i = 0; i < fsl_qdma->n_chans; i++) {
991                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
992
993                 fsl_chan->qdma = fsl_qdma;
994                 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
995                                                         fsl_qdma->num_blocks);
996                 fsl_chan->free = true;
997         }
998
999         ret = fsl_qdma_reg_init(fsl_qdma);
1000         if (ret) {
1001                 DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
1002                 munmap(fsl_qdma->ctrl_base, regs_size);
1003                 goto err;
1004         }
1005
1006         return 0;
1007
1008 err:
1009         rte_free(fsl_qdma->chans);
1010         rte_free(fsl_qdma->status);
1011
1012         return -1;
1013 }
1014
1015 static int
1016 dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
1017                 struct rte_dpaa_device *dpaa_dev)
1018 {
1019         struct rte_dma_dev *dmadev;
1020         int ret;
1021
1022         dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
1023                                       rte_socket_id(),
1024                                       sizeof(struct fsl_qdma_engine));
1025         if (!dmadev) {
1026                 DPAA_QDMA_ERR("Unable to allocate dmadevice");
1027                 return -EINVAL;
1028         }
1029
1030         dpaa_dev->dmadev = dmadev;
1031         dmadev->dev_ops = &dpaa_qdma_ops;
1032         dmadev->device = &dpaa_dev->device;
1033         dmadev->fp_obj->dev_private = dmadev->data->dev_private;
1034         dmadev->fp_obj->copy = dpaa_qdma_enqueue;
1035         dmadev->fp_obj->submit = dpaa_qdma_submit;
1036         dmadev->fp_obj->completed = dpaa_qdma_dequeue;
1037         dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
1038
1039         /* Invoke PMD device initialization function */
1040         ret = dpaa_qdma_init(dmadev);
1041         if (ret) {
1042                 (void)rte_dma_pmd_release(dpaa_dev->device.name);
1043                 return ret;
1044         }
1045
1046         dmadev->state = RTE_DMA_DEV_READY;
1047         return 0;
1048 }
1049
1050 static int
1051 dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
1052 {
1053         struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
1054         struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
1055         int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
1056
1057         for (i = 0; i < max; i++) {
1058                 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1059
1060                 if (fsl_chan->free == false)
1061                         dma_release(fsl_chan);
1062         }
1063
1064         rte_free(fsl_qdma->status);
1065         rte_free(fsl_qdma->chans);
1066
1067         (void)rte_dma_pmd_release(dpaa_dev->device.name);
1068
1069         return 0;
1070 }
1071
1072 static struct rte_dpaa_driver rte_dpaa_qdma_pmd;
1073
1074 static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
1075         .drv_type = FSL_DPAA_QDMA,
1076         .probe = dpaa_qdma_probe,
1077         .remove = dpaa_qdma_remove,
1078 };
1079
1080 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
1081 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);