dma/idxd: add operation statistic tracking
[dpdk.git] / drivers / dma / idxd / idxd_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Intel Corporation
3  */
4
5 #include <x86intrin.h>
6
7 #include <rte_malloc.h>
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_prefetch.h>
11
12 #include "idxd_internal.h"
13
14 #define IDXD_PMD_NAME_STR "dmadev_idxd"
15
16 static __rte_always_inline rte_iova_t
17 __desc_idx_to_iova(struct idxd_dmadev *idxd, uint16_t n)
18 {
19         return idxd->desc_iova + (n * sizeof(struct idxd_hw_desc));
20 }
21
22 static __rte_always_inline void
23 __idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)
24 {
25         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
26                         :
27                         : "a" (dst), "d" (src)
28                         : "memory");
29 }
30
31 static __rte_always_inline void
32 __submit(struct idxd_dmadev *idxd)
33 {
34         rte_prefetch1(&idxd->batch_comp_ring[idxd->batch_idx_read]);
35
36         if (idxd->batch_size == 0)
37                 return;
38
39         /* write completion to batch comp ring */
40         rte_iova_t comp_addr = idxd->batch_iova +
41                         (idxd->batch_idx_write * sizeof(struct idxd_completion));
42
43         if (idxd->batch_size == 1) {
44                 /* submit batch directly */
45                 struct idxd_hw_desc desc =
46                                 idxd->desc_ring[idxd->batch_start & idxd->desc_ring_mask];
47                 desc.completion = comp_addr;
48                 desc.op_flags |= IDXD_FLAG_REQUEST_COMPLETION;
49                 _mm_sfence(); /* fence before writing desc to device */
50                 __idxd_movdir64b(idxd->portal, &desc);
51         } else {
52                 const struct idxd_hw_desc batch_desc = {
53                                 .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
54                                 IDXD_FLAG_COMPLETION_ADDR_VALID |
55                                 IDXD_FLAG_REQUEST_COMPLETION,
56                                 .desc_addr = __desc_idx_to_iova(idxd,
57                                                 idxd->batch_start & idxd->desc_ring_mask),
58                                 .completion = comp_addr,
59                                 .size = idxd->batch_size,
60                 };
61                 _mm_sfence(); /* fence before writing desc to device */
62                 __idxd_movdir64b(idxd->portal, &batch_desc);
63         }
64
65         if (++idxd->batch_idx_write > idxd->max_batches)
66                 idxd->batch_idx_write = 0;
67
68         idxd->stats.submitted += idxd->batch_size;
69
70         idxd->batch_start += idxd->batch_size;
71         idxd->batch_size = 0;
72         idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
73         _mm256_store_si256((void *)&idxd->batch_comp_ring[idxd->batch_idx_write],
74                         _mm256_setzero_si256());
75 }
76
77 static __rte_always_inline int
78 __idxd_write_desc(struct idxd_dmadev *idxd,
79                 const uint32_t op_flags,
80                 const rte_iova_t src,
81                 const rte_iova_t dst,
82                 const uint32_t size,
83                 const uint32_t flags)
84 {
85         uint16_t mask = idxd->desc_ring_mask;
86         uint16_t job_id = idxd->batch_start + idxd->batch_size;
87         /* we never wrap batches, so we only mask the start and allow start+size to overflow */
88         uint16_t write_idx = (idxd->batch_start & mask) + idxd->batch_size;
89
90         /* first check batch ring space then desc ring space */
91         if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
92                         idxd->batch_idx_write + 1 == idxd->batch_idx_read)
93                 return -ENOSPC;
94         if (((write_idx + 1) & mask) == (idxd->ids_returned & mask))
95                 return -ENOSPC;
96
97         /* write desc. Note: descriptors don't wrap, but the completion address does */
98         const uint64_t op_flags64 = (uint64_t)(op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID) << 32;
99         const uint64_t comp_addr = __desc_idx_to_iova(idxd, write_idx & mask);
100         _mm256_store_si256((void *)&idxd->desc_ring[write_idx],
101                         _mm256_set_epi64x(dst, src, comp_addr, op_flags64));
102         _mm256_store_si256((void *)&idxd->desc_ring[write_idx].size,
103                         _mm256_set_epi64x(0, 0, 0, size));
104
105         idxd->batch_size++;
106
107         rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);
108
109         if (flags & RTE_DMA_OP_FLAG_SUBMIT)
110                 __submit(idxd);
111
112         return job_id;
113 }
114
115 int
116 idxd_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,
117                 rte_iova_t dst, unsigned int length, uint64_t flags)
118 {
119         /* we can take advantage of the fact that the fence flag in dmadev and DSA are the same,
120          * but check it at compile time to be sure.
121          */
122         RTE_BUILD_BUG_ON(RTE_DMA_OP_FLAG_FENCE != IDXD_FLAG_FENCE);
123         uint32_t memmove = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
124                         IDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);
125         return __idxd_write_desc(dev_private, memmove, src, dst, length,
126                         flags);
127 }
128
129 int
130 idxd_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern,
131                 rte_iova_t dst, unsigned int length, uint64_t flags)
132 {
133         uint32_t fill = (idxd_op_fill << IDXD_CMD_OP_SHIFT) |
134                         IDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);
135         return __idxd_write_desc(dev_private, fill, pattern, dst, length,
136                         flags);
137 }
138
139 int
140 idxd_submit(void *dev_private, uint16_t qid __rte_unused)
141 {
142         __submit(dev_private);
143         return 0;
144 }
145
146 static enum rte_dma_status_code
147 get_comp_status(struct idxd_completion *c)
148 {
149         uint8_t st = c->status;
150         switch (st) {
151         /* successful descriptors are not written back normally */
152         case IDXD_COMP_STATUS_INCOMPLETE:
153         case IDXD_COMP_STATUS_SUCCESS:
154                 return RTE_DMA_STATUS_SUCCESSFUL;
155         case IDXD_COMP_STATUS_INVALID_OPCODE:
156                 return RTE_DMA_STATUS_INVALID_OPCODE;
157         case IDXD_COMP_STATUS_INVALID_SIZE:
158                 return RTE_DMA_STATUS_INVALID_LENGTH;
159         case IDXD_COMP_STATUS_SKIPPED:
160                 return RTE_DMA_STATUS_NOT_ATTEMPTED;
161         default:
162                 return RTE_DMA_STATUS_ERROR_UNKNOWN;
163         }
164 }
165
166 static __rte_always_inline int
167 batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
168 {
169         uint16_t ret;
170         uint8_t bstatus;
171
172         if (max_ops == 0)
173                 return 0;
174
175         /* first check if there are any unreturned handles from last time */
176         if (idxd->ids_avail != idxd->ids_returned) {
177                 ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);
178                 idxd->ids_returned += ret;
179                 if (status)
180                         memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));
181                 return ret;
182         }
183
184         if (idxd->batch_idx_read == idxd->batch_idx_write)
185                 return 0;
186
187         bstatus = idxd->batch_comp_ring[idxd->batch_idx_read].status;
188         /* now check if next batch is complete and successful */
189         if (bstatus == IDXD_COMP_STATUS_SUCCESS) {
190                 /* since the batch idx ring stores the start of each batch, pre-increment to lookup
191                  * start of next batch.
192                  */
193                 if (++idxd->batch_idx_read > idxd->max_batches)
194                         idxd->batch_idx_read = 0;
195                 idxd->ids_avail = idxd->batch_idx_ring[idxd->batch_idx_read];
196
197                 ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);
198                 idxd->ids_returned += ret;
199                 if (status)
200                         memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));
201                 return ret;
202         }
203         /* check if batch is incomplete */
204         else if (bstatus == IDXD_COMP_STATUS_INCOMPLETE)
205                 return 0;
206
207         return -1; /* error case */
208 }
209
210 static inline uint16_t
211 batch_completed(struct idxd_dmadev *idxd, uint16_t max_ops, bool *has_error)
212 {
213         uint16_t i;
214         uint16_t b_start, b_end, next_batch;
215
216         int ret = batch_ok(idxd, max_ops, NULL);
217         if (ret >= 0)
218                 return ret;
219
220         /* ERROR case, not successful, not incomplete */
221         /* Get the batch size, and special case size 1.
222          * once we identify the actual failure job, return other jobs, then update
223          * the batch ring indexes to make it look like the first job of the batch has failed.
224          * Subsequent calls here will always return zero packets, and the error must be cleared by
225          * calling the completed_status() function.
226          */
227         next_batch = (idxd->batch_idx_read + 1);
228         if (next_batch > idxd->max_batches)
229                 next_batch = 0;
230         b_start = idxd->batch_idx_ring[idxd->batch_idx_read];
231         b_end = idxd->batch_idx_ring[next_batch];
232
233         if (b_end - b_start == 1) { /* not a batch */
234                 *has_error = true;
235                 return 0;
236         }
237
238         for (i = b_start; i < b_end; i++) {
239                 struct idxd_completion *c = (void *)&idxd->desc_ring[i & idxd->desc_ring_mask];
240                 if (c->status > IDXD_COMP_STATUS_SUCCESS) /* ignore incomplete(0) and success(1) */
241                         break;
242         }
243         ret = RTE_MIN((uint16_t)(i - idxd->ids_returned), max_ops);
244         if (ret < max_ops)
245                 *has_error = true; /* we got up to the point of error */
246         idxd->ids_avail = idxd->ids_returned += ret;
247
248         /* to ensure we can call twice and just return 0, set start of batch to where we finished */
249         idxd->batch_comp_ring[idxd->batch_idx_read].completed_size -= ret;
250         idxd->batch_idx_ring[idxd->batch_idx_read] += ret;
251         if (idxd->batch_idx_ring[next_batch] - idxd->batch_idx_ring[idxd->batch_idx_read] == 1) {
252                 /* copy over the descriptor status to the batch ring as if no batch */
253                 uint16_t d_idx = idxd->batch_idx_ring[idxd->batch_idx_read] & idxd->desc_ring_mask;
254                 struct idxd_completion *desc_comp = (void *)&idxd->desc_ring[d_idx];
255                 idxd->batch_comp_ring[idxd->batch_idx_read].status = desc_comp->status;
256         }
257
258         return ret;
259 }
260
261 static uint16_t
262 batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
263 {
264         uint16_t next_batch;
265
266         int ret = batch_ok(idxd, max_ops, status);
267         if (ret >= 0)
268                 return ret;
269
270         /* ERROR case, not successful, not incomplete */
271         /* Get the batch size, and special case size 1.
272          */
273         next_batch = (idxd->batch_idx_read + 1);
274         if (next_batch > idxd->max_batches)
275                 next_batch = 0;
276         const uint16_t b_start = idxd->batch_idx_ring[idxd->batch_idx_read];
277         const uint16_t b_end = idxd->batch_idx_ring[next_batch];
278         const uint16_t b_len = b_end - b_start;
279         if (b_len == 1) {/* not a batch */
280                 *status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
281                 if (status != RTE_DMA_STATUS_SUCCESSFUL)
282                         idxd->stats.errors++;
283                 idxd->ids_avail++;
284                 idxd->ids_returned++;
285                 idxd->batch_idx_read = next_batch;
286                 return 1;
287         }
288
289         /* not a single-element batch, need to process more.
290          * Scenarios:
291          * 1. max_ops >= batch_size - can fit everything, simple case
292          *   - loop through completed ops and then add on any not-attempted ones
293          * 2. max_ops < batch_size - can't fit everything, more complex case
294          *   - loop through completed/incomplete and stop when hit max_ops
295          *   - adjust the batch descriptor to update where we stopped, with appropriate bcount
296          *   - if bcount is to be exactly 1, update the batch descriptor as it will be treated as
297          *     non-batch next time.
298          */
299         const uint16_t bcount = idxd->batch_comp_ring[idxd->batch_idx_read].completed_size;
300         for (ret = 0; ret < b_len && ret < max_ops; ret++) {
301                 struct idxd_completion *c = (void *)
302                                 &idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
303                 status[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;
304                 if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
305                         idxd->stats.errors++;
306         }
307         idxd->ids_avail = idxd->ids_returned += ret;
308
309         /* everything fit */
310         if (ret == b_len) {
311                 idxd->batch_idx_read = next_batch;
312                 return ret;
313         }
314
315         /* set up for next time, update existing batch descriptor & start idx at batch_idx_read */
316         idxd->batch_idx_ring[idxd->batch_idx_read] += ret;
317         if (ret > bcount) {
318                 /* we have only incomplete ones - set batch completed size to 0 */
319                 struct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];
320                 comp->completed_size = 0;
321                 /* if there is only one descriptor left, job skipped so set flag appropriately */
322                 if (b_len - ret == 1)
323                         comp->status = IDXD_COMP_STATUS_SKIPPED;
324         } else {
325                 struct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];
326                 comp->completed_size -= ret;
327                 /* if there is only one descriptor left, copy status info straight to desc */
328                 if (comp->completed_size == 1) {
329                         struct idxd_completion *c = (void *)
330                                         &idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
331                         comp->status = c->status;
332                         /* individual descs can be ok without writeback, but not batches */
333                         if (comp->status == IDXD_COMP_STATUS_INCOMPLETE)
334                                 comp->status = IDXD_COMP_STATUS_SUCCESS;
335                 } else if (bcount == b_len) {
336                         /* check if we still have an error, and clear flag if not */
337                         uint16_t i;
338                         for (i = b_start + ret; i < b_end; i++) {
339                                 struct idxd_completion *c = (void *)
340                                                 &idxd->desc_ring[i & idxd->desc_ring_mask];
341                                 if (c->status > IDXD_COMP_STATUS_SUCCESS)
342                                         break;
343                         }
344                         if (i == b_end) /* no errors */
345                                 comp->status = IDXD_COMP_STATUS_SUCCESS;
346                 }
347         }
348
349         return ret;
350 }
351
352 uint16_t
353 idxd_completed(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
354                 uint16_t *last_idx, bool *has_error)
355 {
356         struct idxd_dmadev *idxd = dev_private;
357         uint16_t batch, ret = 0;
358
359         do {
360                 batch = batch_completed(idxd, max_ops - ret, has_error);
361                 ret += batch;
362         } while (batch > 0 && *has_error == false);
363
364         idxd->stats.completed += ret;
365         *last_idx = idxd->ids_returned - 1;
366         return ret;
367 }
368
369 uint16_t
370 idxd_completed_status(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
371                 uint16_t *last_idx, enum rte_dma_status_code *status)
372 {
373         struct idxd_dmadev *idxd = dev_private;
374         uint16_t batch, ret = 0;
375
376         do {
377                 batch = batch_completed_status(idxd, max_ops - ret, &status[ret]);
378                 ret += batch;
379         } while (batch > 0);
380
381         idxd->stats.completed += ret;
382         *last_idx = idxd->ids_returned - 1;
383         return ret;
384 }
385
386 int
387 idxd_dump(const struct rte_dma_dev *dev, FILE *f)
388 {
389         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
390         unsigned int i;
391
392         fprintf(f, "== IDXD Private Data ==\n");
393         fprintf(f, "  Portal: %p\n", idxd->portal);
394         fprintf(f, "  Config: { ring_size: %u }\n",
395                         idxd->qcfg.nb_desc);
396         fprintf(f, "  Batch ring (sz = %u, max_batches = %u):\n\t",
397                         idxd->max_batches + 1, idxd->max_batches);
398         for (i = 0; i <= idxd->max_batches; i++) {
399                 fprintf(f, " %u ", idxd->batch_idx_ring[i]);
400                 if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
401                         fprintf(f, "[rd ptr, wr ptr] ");
402                 else if (i == idxd->batch_idx_read)
403                         fprintf(f, "[rd ptr] ");
404                 else if (i == idxd->batch_idx_write)
405                         fprintf(f, "[wr ptr] ");
406                 if (i == idxd->max_batches)
407                         fprintf(f, "\n");
408         }
409
410         fprintf(f, "  Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
411         fprintf(f, "  IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
412         return 0;
413 }
414
415 int
416 idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
417                 struct rte_dma_stats *stats, uint32_t stats_sz)
418 {
419         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
420         if (stats_sz < sizeof(*stats))
421                 return -EINVAL;
422         *stats = idxd->stats;
423         return 0;
424 }
425
426 int
427 idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
428 {
429         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
430         idxd->stats = (struct rte_dma_stats){0};
431         return 0;
432 }
433
434 int
435 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
436 {
437         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
438
439         if (size < sizeof(*info))
440                 return -EINVAL;
441
442         *info = (struct rte_dma_info) {
443                         .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
444                                 RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
445                         .max_vchans = 1,
446                         .max_desc = 4096,
447                         .min_desc = 64,
448         };
449         if (idxd->sva_support)
450                 info->dev_capa |= RTE_DMA_CAPA_SVA;
451         return 0;
452 }
453
454 int
455 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
456                 uint32_t conf_sz)
457 {
458         if (sizeof(struct rte_dma_conf) != conf_sz)
459                 return -EINVAL;
460
461         if (dev_conf->nb_vchans != 1)
462                 return -EINVAL;
463         return 0;
464 }
465
466 int
467 idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
468                 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
469 {
470         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
471         uint16_t max_desc = qconf->nb_desc;
472
473         if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
474                 return -EINVAL;
475
476         idxd->qcfg = *qconf;
477
478         if (!rte_is_power_of_2(max_desc))
479                 max_desc = rte_align32pow2(max_desc);
480         IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
481         idxd->desc_ring_mask = max_desc - 1;
482         idxd->qcfg.nb_desc = max_desc;
483
484         /* in case we are reconfiguring a device, free any existing memory */
485         rte_free(idxd->desc_ring);
486
487         /* allocate the descriptor ring at 2x size as batches can't wrap */
488         idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
489         if (idxd->desc_ring == NULL)
490                 return -ENOMEM;
491         idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
492
493         idxd->batch_idx_read = 0;
494         idxd->batch_idx_write = 0;
495         idxd->batch_start = 0;
496         idxd->batch_size = 0;
497         idxd->ids_returned = 0;
498         idxd->ids_avail = 0;
499
500         memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
501                         (idxd->max_batches + 1));
502         return 0;
503 }
504
505 int
506 idxd_dmadev_create(const char *name, struct rte_device *dev,
507                    const struct idxd_dmadev *base_idxd,
508                    const struct rte_dma_dev_ops *ops)
509 {
510         struct idxd_dmadev *idxd = NULL;
511         struct rte_dma_dev *dmadev = NULL;
512         int ret = 0;
513
514         RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
515         RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
516         RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
517
518         if (!name) {
519                 IDXD_PMD_ERR("Invalid name of the device!");
520                 ret = -EINVAL;
521                 goto cleanup;
522         }
523
524         /* Allocate device structure */
525         dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
526         if (dmadev == NULL) {
527                 IDXD_PMD_ERR("Unable to allocate dma device");
528                 ret = -ENOMEM;
529                 goto cleanup;
530         }
531         dmadev->dev_ops = ops;
532         dmadev->device = dev;
533
534         dmadev->fp_obj->copy = idxd_enqueue_copy;
535         dmadev->fp_obj->fill = idxd_enqueue_fill;
536         dmadev->fp_obj->submit = idxd_submit;
537         dmadev->fp_obj->completed = idxd_completed;
538         dmadev->fp_obj->completed_status = idxd_completed_status;
539
540         idxd = dmadev->data->dev_private;
541         *idxd = *base_idxd; /* copy over the main fields already passed in */
542         idxd->dmadev = dmadev;
543
544         /* allocate batch index ring and completion ring.
545          * The +1 is because we can never fully use
546          * the ring, otherwise read == write means both full and empty.
547          */
548         idxd->batch_comp_ring = rte_zmalloc_socket(NULL, (sizeof(idxd->batch_idx_ring[0]) +
549                         sizeof(idxd->batch_comp_ring[0]))       * (idxd->max_batches + 1),
550                         sizeof(idxd->batch_comp_ring[0]), dev->numa_node);
551         if (idxd->batch_comp_ring == NULL) {
552                 IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
553                 ret = -ENOMEM;
554                 goto cleanup;
555         }
556         idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
557         idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
558
559         dmadev->fp_obj->dev_private = idxd;
560
561         idxd->dmadev->state = RTE_DMA_DEV_READY;
562
563         return 0;
564
565 cleanup:
566         if (dmadev)
567                 rte_dma_pmd_release(name);
568
569         return ret;
570 }
571
572 int idxd_pmd_logtype;
573
574 RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);