vdpa/mlx5: add task ring for multi-thread management
[dpdk.git] / drivers / dma / idxd / idxd_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Intel Corporation
3  */
4
5 #include <x86intrin.h>
6
7 #include <rte_malloc.h>
8 #include <rte_common.h>
9 #include <rte_log.h>
10 #include <rte_prefetch.h>
11
12 #include "idxd_internal.h"
13
14 #define IDXD_PMD_NAME_STR "dmadev_idxd"
15
16 /* systems with DSA all support AVX2 so allow our data-path functions to
17  * always use at least that instruction set
18  */
19 #ifndef __AVX2__
20 #define __use_avx2 __attribute__((target("avx2")))
21 #else
22 #define __use_avx2
23 #endif
24
25 __use_avx2
26 static __rte_always_inline rte_iova_t
27 __desc_idx_to_iova(struct idxd_dmadev *idxd, uint16_t n)
28 {
29         return idxd->desc_iova + (n * sizeof(struct idxd_hw_desc));
30 }
31
32 __use_avx2
33 static __rte_always_inline void
34 __idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)
35 {
36         asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
37                         :
38                         : "a" (dst), "d" (src)
39                         : "memory");
40 }
41
42 __use_avx2
43 static __rte_always_inline void
44 __submit(struct idxd_dmadev *idxd)
45 {
46         rte_prefetch1(&idxd->batch_comp_ring[idxd->batch_idx_read]);
47
48         if (idxd->batch_size == 0)
49                 return;
50
51         /* write completion to batch comp ring */
52         rte_iova_t comp_addr = idxd->batch_iova +
53                         (idxd->batch_idx_write * sizeof(struct idxd_completion));
54
55         if (idxd->batch_size == 1) {
56                 /* submit batch directly */
57                 struct idxd_hw_desc desc =
58                                 idxd->desc_ring[idxd->batch_start & idxd->desc_ring_mask];
59                 desc.completion = comp_addr;
60                 desc.op_flags |= IDXD_FLAG_REQUEST_COMPLETION;
61                 _mm_sfence(); /* fence before writing desc to device */
62                 __idxd_movdir64b(idxd->portal, &desc);
63         } else {
64                 const struct idxd_hw_desc batch_desc = {
65                                 .op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
66                                 IDXD_FLAG_COMPLETION_ADDR_VALID |
67                                 IDXD_FLAG_REQUEST_COMPLETION,
68                                 .desc_addr = __desc_idx_to_iova(idxd,
69                                                 idxd->batch_start & idxd->desc_ring_mask),
70                                 .completion = comp_addr,
71                                 .size = idxd->batch_size,
72                 };
73                 _mm_sfence(); /* fence before writing desc to device */
74                 __idxd_movdir64b(idxd->portal, &batch_desc);
75         }
76
77         if (++idxd->batch_idx_write > idxd->max_batches)
78                 idxd->batch_idx_write = 0;
79
80         idxd->stats.submitted += idxd->batch_size;
81
82         idxd->batch_start += idxd->batch_size;
83         idxd->batch_size = 0;
84         idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
85         _mm256_store_si256((void *)&idxd->batch_comp_ring[idxd->batch_idx_write],
86                         _mm256_setzero_si256());
87 }
88
89 __use_avx2
90 static __rte_always_inline int
91 __idxd_write_desc(struct idxd_dmadev *idxd,
92                 const uint32_t op_flags,
93                 const rte_iova_t src,
94                 const rte_iova_t dst,
95                 const uint32_t size,
96                 const uint32_t flags)
97 {
98         uint16_t mask = idxd->desc_ring_mask;
99         uint16_t job_id = idxd->batch_start + idxd->batch_size;
100         /* we never wrap batches, so we only mask the start and allow start+size to overflow */
101         uint16_t write_idx = (idxd->batch_start & mask) + idxd->batch_size;
102
103         /* first check batch ring space then desc ring space */
104         if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
105                         idxd->batch_idx_write + 1 == idxd->batch_idx_read)
106                 return -ENOSPC;
107         if (((write_idx + 1) & mask) == (idxd->ids_returned & mask))
108                 return -ENOSPC;
109
110         /* write desc. Note: descriptors don't wrap, but the completion address does */
111         const uint64_t op_flags64 = (uint64_t)(op_flags | IDXD_FLAG_COMPLETION_ADDR_VALID) << 32;
112         const uint64_t comp_addr = __desc_idx_to_iova(idxd, write_idx & mask);
113         _mm256_store_si256((void *)&idxd->desc_ring[write_idx],
114                         _mm256_set_epi64x(dst, src, comp_addr, op_flags64));
115         _mm256_store_si256((void *)&idxd->desc_ring[write_idx].size,
116                         _mm256_set_epi64x(0, 0, 0, size));
117
118         idxd->batch_size++;
119
120         rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);
121
122         if (flags & RTE_DMA_OP_FLAG_SUBMIT)
123                 __submit(idxd);
124
125         return job_id;
126 }
127
128 __use_avx2
129 int
130 idxd_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,
131                 rte_iova_t dst, unsigned int length, uint64_t flags)
132 {
133         /* we can take advantage of the fact that the fence flag in dmadev and DSA are the same,
134          * but check it at compile time to be sure.
135          */
136         RTE_BUILD_BUG_ON(RTE_DMA_OP_FLAG_FENCE != IDXD_FLAG_FENCE);
137         uint32_t memmove = (idxd_op_memmove << IDXD_CMD_OP_SHIFT) |
138                         IDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);
139         return __idxd_write_desc(dev_private, memmove, src, dst, length,
140                         flags);
141 }
142
143 __use_avx2
144 int
145 idxd_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern,
146                 rte_iova_t dst, unsigned int length, uint64_t flags)
147 {
148         uint32_t fill = (idxd_op_fill << IDXD_CMD_OP_SHIFT) |
149                         IDXD_FLAG_CACHE_CONTROL | (flags & IDXD_FLAG_FENCE);
150         return __idxd_write_desc(dev_private, fill, pattern, dst, length,
151                         flags);
152 }
153
154 __use_avx2
155 int
156 idxd_submit(void *dev_private, uint16_t qid __rte_unused)
157 {
158         __submit(dev_private);
159         return 0;
160 }
161
162 __use_avx2
163 static enum rte_dma_status_code
164 get_comp_status(struct idxd_completion *c)
165 {
166         uint8_t st = c->status;
167         switch (st) {
168         /* successful descriptors are not written back normally */
169         case IDXD_COMP_STATUS_INCOMPLETE:
170         case IDXD_COMP_STATUS_SUCCESS:
171                 return RTE_DMA_STATUS_SUCCESSFUL;
172         case IDXD_COMP_STATUS_INVALID_OPCODE:
173                 return RTE_DMA_STATUS_INVALID_OPCODE;
174         case IDXD_COMP_STATUS_INVALID_SIZE:
175                 return RTE_DMA_STATUS_INVALID_LENGTH;
176         case IDXD_COMP_STATUS_SKIPPED:
177                 return RTE_DMA_STATUS_NOT_ATTEMPTED;
178         default:
179                 return RTE_DMA_STATUS_ERROR_UNKNOWN;
180         }
181 }
182
183 __use_avx2
184 int
185 idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
186                 enum rte_dma_vchan_status *status)
187 {
188         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
189         uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
190                         idxd->batch_idx_write - 1;
191         uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
192
193         /* An IDXD device will always be either active or idle.
194          * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
195          */
196         *status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
197
198         return 0;
199 }
200
201 __use_avx2
202 static __rte_always_inline int
203 batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
204 {
205         uint16_t ret;
206         uint8_t bstatus;
207
208         if (max_ops == 0)
209                 return 0;
210
211         /* first check if there are any unreturned handles from last time */
212         if (idxd->ids_avail != idxd->ids_returned) {
213                 ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);
214                 idxd->ids_returned += ret;
215                 if (status)
216                         memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));
217                 return ret;
218         }
219
220         if (idxd->batch_idx_read == idxd->batch_idx_write)
221                 return 0;
222
223         bstatus = idxd->batch_comp_ring[idxd->batch_idx_read].status;
224         /* now check if next batch is complete and successful */
225         if (bstatus == IDXD_COMP_STATUS_SUCCESS) {
226                 /* since the batch idx ring stores the start of each batch, pre-increment to lookup
227                  * start of next batch.
228                  */
229                 if (++idxd->batch_idx_read > idxd->max_batches)
230                         idxd->batch_idx_read = 0;
231                 idxd->ids_avail = idxd->batch_idx_ring[idxd->batch_idx_read];
232
233                 ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), max_ops);
234                 idxd->ids_returned += ret;
235                 if (status)
236                         memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * sizeof(*status));
237                 return ret;
238         }
239         /* check if batch is incomplete */
240         else if (bstatus == IDXD_COMP_STATUS_INCOMPLETE)
241                 return 0;
242
243         return -1; /* error case */
244 }
245
246 __use_avx2
247 static inline uint16_t
248 batch_completed(struct idxd_dmadev *idxd, uint16_t max_ops, bool *has_error)
249 {
250         uint16_t i;
251         uint16_t b_start, b_end, next_batch;
252
253         int ret = batch_ok(idxd, max_ops, NULL);
254         if (ret >= 0)
255                 return ret;
256
257         /* ERROR case, not successful, not incomplete */
258         /* Get the batch size, and special case size 1.
259          * once we identify the actual failure job, return other jobs, then update
260          * the batch ring indexes to make it look like the first job of the batch has failed.
261          * Subsequent calls here will always return zero packets, and the error must be cleared by
262          * calling the completed_status() function.
263          */
264         next_batch = (idxd->batch_idx_read + 1);
265         if (next_batch > idxd->max_batches)
266                 next_batch = 0;
267         b_start = idxd->batch_idx_ring[idxd->batch_idx_read];
268         b_end = idxd->batch_idx_ring[next_batch];
269
270         if (b_end - b_start == 1) { /* not a batch */
271                 *has_error = true;
272                 return 0;
273         }
274
275         for (i = b_start; i < b_end; i++) {
276                 struct idxd_completion *c = (void *)&idxd->desc_ring[i & idxd->desc_ring_mask];
277                 if (c->status > IDXD_COMP_STATUS_SUCCESS) /* ignore incomplete(0) and success(1) */
278                         break;
279         }
280         ret = RTE_MIN((uint16_t)(i - idxd->ids_returned), max_ops);
281         if (ret < max_ops)
282                 *has_error = true; /* we got up to the point of error */
283         idxd->ids_avail = idxd->ids_returned += ret;
284
285         /* to ensure we can call twice and just return 0, set start of batch to where we finished */
286         idxd->batch_comp_ring[idxd->batch_idx_read].completed_size -= ret;
287         idxd->batch_idx_ring[idxd->batch_idx_read] += ret;
288         if (idxd->batch_idx_ring[next_batch] - idxd->batch_idx_ring[idxd->batch_idx_read] == 1) {
289                 /* copy over the descriptor status to the batch ring as if no batch */
290                 uint16_t d_idx = idxd->batch_idx_ring[idxd->batch_idx_read] & idxd->desc_ring_mask;
291                 struct idxd_completion *desc_comp = (void *)&idxd->desc_ring[d_idx];
292                 idxd->batch_comp_ring[idxd->batch_idx_read].status = desc_comp->status;
293         }
294
295         return ret;
296 }
297
298 __use_avx2
299 static uint16_t
300 batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
301 {
302         uint16_t next_batch;
303
304         int ret = batch_ok(idxd, max_ops, status);
305         if (ret >= 0)
306                 return ret;
307
308         /* ERROR case, not successful, not incomplete */
309         /* Get the batch size, and special case size 1.
310          */
311         next_batch = (idxd->batch_idx_read + 1);
312         if (next_batch > idxd->max_batches)
313                 next_batch = 0;
314         const uint16_t b_start = idxd->batch_idx_ring[idxd->batch_idx_read];
315         const uint16_t b_end = idxd->batch_idx_ring[next_batch];
316         const uint16_t b_len = b_end - b_start;
317         if (b_len == 1) {/* not a batch */
318                 *status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
319                 if (status != RTE_DMA_STATUS_SUCCESSFUL)
320                         idxd->stats.errors++;
321                 idxd->ids_avail++;
322                 idxd->ids_returned++;
323                 idxd->batch_idx_read = next_batch;
324                 return 1;
325         }
326
327         /* not a single-element batch, need to process more.
328          * Scenarios:
329          * 1. max_ops >= batch_size - can fit everything, simple case
330          *   - loop through completed ops and then add on any not-attempted ones
331          * 2. max_ops < batch_size - can't fit everything, more complex case
332          *   - loop through completed/incomplete and stop when hit max_ops
333          *   - adjust the batch descriptor to update where we stopped, with appropriate bcount
334          *   - if bcount is to be exactly 1, update the batch descriptor as it will be treated as
335          *     non-batch next time.
336          */
337         const uint16_t bcount = idxd->batch_comp_ring[idxd->batch_idx_read].completed_size;
338         for (ret = 0; ret < b_len && ret < max_ops; ret++) {
339                 struct idxd_completion *c = (void *)
340                                 &idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
341                 status[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;
342                 if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
343                         idxd->stats.errors++;
344         }
345         idxd->ids_avail = idxd->ids_returned += ret;
346
347         /* everything fit */
348         if (ret == b_len) {
349                 idxd->batch_idx_read = next_batch;
350                 return ret;
351         }
352
353         /* set up for next time, update existing batch descriptor & start idx at batch_idx_read */
354         idxd->batch_idx_ring[idxd->batch_idx_read] += ret;
355         if (ret > bcount) {
356                 /* we have only incomplete ones - set batch completed size to 0 */
357                 struct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];
358                 comp->completed_size = 0;
359                 /* if there is only one descriptor left, job skipped so set flag appropriately */
360                 if (b_len - ret == 1)
361                         comp->status = IDXD_COMP_STATUS_SKIPPED;
362         } else {
363                 struct idxd_completion *comp = &idxd->batch_comp_ring[idxd->batch_idx_read];
364                 comp->completed_size -= ret;
365                 /* if there is only one descriptor left, copy status info straight to desc */
366                 if (comp->completed_size == 1) {
367                         struct idxd_completion *c = (void *)
368                                         &idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
369                         comp->status = c->status;
370                         /* individual descs can be ok without writeback, but not batches */
371                         if (comp->status == IDXD_COMP_STATUS_INCOMPLETE)
372                                 comp->status = IDXD_COMP_STATUS_SUCCESS;
373                 } else if (bcount == b_len) {
374                         /* check if we still have an error, and clear flag if not */
375                         uint16_t i;
376                         for (i = b_start + ret; i < b_end; i++) {
377                                 struct idxd_completion *c = (void *)
378                                                 &idxd->desc_ring[i & idxd->desc_ring_mask];
379                                 if (c->status > IDXD_COMP_STATUS_SUCCESS)
380                                         break;
381                         }
382                         if (i == b_end) /* no errors */
383                                 comp->status = IDXD_COMP_STATUS_SUCCESS;
384                 }
385         }
386
387         return ret;
388 }
389
390 __use_avx2
391 uint16_t
392 idxd_completed(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
393                 uint16_t *last_idx, bool *has_error)
394 {
395         struct idxd_dmadev *idxd = dev_private;
396         uint16_t batch, ret = 0;
397
398         do {
399                 batch = batch_completed(idxd, max_ops - ret, has_error);
400                 ret += batch;
401         } while (batch > 0 && *has_error == false);
402
403         idxd->stats.completed += ret;
404         *last_idx = idxd->ids_returned - 1;
405         return ret;
406 }
407
408 __use_avx2
409 uint16_t
410 idxd_completed_status(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
411                 uint16_t *last_idx, enum rte_dma_status_code *status)
412 {
413         struct idxd_dmadev *idxd = dev_private;
414         uint16_t batch, ret = 0;
415
416         do {
417                 batch = batch_completed_status(idxd, max_ops - ret, &status[ret]);
418                 ret += batch;
419         } while (batch > 0);
420
421         idxd->stats.completed += ret;
422         *last_idx = idxd->ids_returned - 1;
423         return ret;
424 }
425
426 int
427 idxd_dump(const struct rte_dma_dev *dev, FILE *f)
428 {
429         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
430         unsigned int i;
431
432         fprintf(f, "== IDXD Private Data ==\n");
433         fprintf(f, "  Portal: %p\n", idxd->portal);
434         fprintf(f, "  Config: { ring_size: %u }\n",
435                         idxd->qcfg.nb_desc);
436         fprintf(f, "  Batch ring (sz = %u, max_batches = %u):\n\t",
437                         idxd->max_batches + 1, idxd->max_batches);
438         for (i = 0; i <= idxd->max_batches; i++) {
439                 fprintf(f, " %u ", idxd->batch_idx_ring[i]);
440                 if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
441                         fprintf(f, "[rd ptr, wr ptr] ");
442                 else if (i == idxd->batch_idx_read)
443                         fprintf(f, "[rd ptr] ");
444                 else if (i == idxd->batch_idx_write)
445                         fprintf(f, "[wr ptr] ");
446                 if (i == idxd->max_batches)
447                         fprintf(f, "\n");
448         }
449
450         fprintf(f, "  Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
451         fprintf(f, "  IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
452         return 0;
453 }
454
455 int
456 idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
457                 struct rte_dma_stats *stats, uint32_t stats_sz)
458 {
459         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
460         if (stats_sz < sizeof(*stats))
461                 return -EINVAL;
462         *stats = idxd->stats;
463         return 0;
464 }
465
466 int
467 idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
468 {
469         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
470         idxd->stats = (struct rte_dma_stats){0};
471         return 0;
472 }
473
474 int
475 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
476 {
477         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
478
479         if (size < sizeof(*info))
480                 return -EINVAL;
481
482         *info = (struct rte_dma_info) {
483                         .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
484                                 RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
485                         .max_vchans = 1,
486                         .max_desc = 4096,
487                         .min_desc = 64,
488         };
489         if (idxd->sva_support)
490                 info->dev_capa |= RTE_DMA_CAPA_SVA;
491         return 0;
492 }
493
494 uint16_t
495 idxd_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
496 {
497         const struct idxd_dmadev *idxd = dev_private;
498         uint16_t write_idx = idxd->batch_start + idxd->batch_size;
499         uint16_t used_space;
500
501         /* Check for space in the batch ring */
502         if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
503                         idxd->batch_idx_write + 1 == idxd->batch_idx_read)
504                 return 0;
505
506         /* Subtract and mask to get in correct range */
507         used_space = (write_idx - idxd->ids_returned) & idxd->desc_ring_mask;
508
509         const int ret = RTE_MIN((idxd->desc_ring_mask - used_space),
510                         (idxd->max_batch_size - idxd->batch_size));
511         return ret < 0 ? 0 : (uint16_t)ret;
512 }
513
514 int
515 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
516                 uint32_t conf_sz)
517 {
518         if (sizeof(struct rte_dma_conf) != conf_sz)
519                 return -EINVAL;
520
521         if (dev_conf->nb_vchans != 1)
522                 return -EINVAL;
523         return 0;
524 }
525
526 int
527 idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
528                 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
529 {
530         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
531         uint16_t max_desc = qconf->nb_desc;
532
533         if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
534                 return -EINVAL;
535
536         idxd->qcfg = *qconf;
537
538         if (!rte_is_power_of_2(max_desc))
539                 max_desc = rte_align32pow2(max_desc);
540         IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
541         idxd->desc_ring_mask = max_desc - 1;
542         idxd->qcfg.nb_desc = max_desc;
543
544         /* in case we are reconfiguring a device, free any existing memory */
545         rte_free(idxd->desc_ring);
546
547         /* allocate the descriptor ring at 2x size as batches can't wrap */
548         idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
549         if (idxd->desc_ring == NULL)
550                 return -ENOMEM;
551         idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
552
553         idxd->batch_idx_read = 0;
554         idxd->batch_idx_write = 0;
555         idxd->batch_start = 0;
556         idxd->batch_size = 0;
557         idxd->ids_returned = 0;
558         idxd->ids_avail = 0;
559
560         memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
561                         (idxd->max_batches + 1));
562         return 0;
563 }
564
565 int
566 idxd_dmadev_create(const char *name, struct rte_device *dev,
567                    const struct idxd_dmadev *base_idxd,
568                    const struct rte_dma_dev_ops *ops)
569 {
570         struct idxd_dmadev *idxd = NULL;
571         struct rte_dma_dev *dmadev = NULL;
572         int ret = 0;
573
574         RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
575         RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
576         RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
577
578         if (!name) {
579                 IDXD_PMD_ERR("Invalid name of the device!");
580                 ret = -EINVAL;
581                 goto cleanup;
582         }
583
584         /* Allocate device structure */
585         dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
586         if (dmadev == NULL) {
587                 IDXD_PMD_ERR("Unable to allocate dma device");
588                 ret = -ENOMEM;
589                 goto cleanup;
590         }
591         dmadev->dev_ops = ops;
592         dmadev->device = dev;
593
594         dmadev->fp_obj->copy = idxd_enqueue_copy;
595         dmadev->fp_obj->fill = idxd_enqueue_fill;
596         dmadev->fp_obj->submit = idxd_submit;
597         dmadev->fp_obj->completed = idxd_completed;
598         dmadev->fp_obj->completed_status = idxd_completed_status;
599         dmadev->fp_obj->burst_capacity = idxd_burst_capacity;
600
601         idxd = dmadev->data->dev_private;
602         *idxd = *base_idxd; /* copy over the main fields already passed in */
603         idxd->dmadev = dmadev;
604
605         /* allocate batch index ring and completion ring.
606          * The +1 is because we can never fully use
607          * the ring, otherwise read == write means both full and empty.
608          */
609         idxd->batch_comp_ring = rte_zmalloc_socket(NULL, (sizeof(idxd->batch_idx_ring[0]) +
610                         sizeof(idxd->batch_comp_ring[0]))       * (idxd->max_batches + 1),
611                         sizeof(idxd->batch_comp_ring[0]), dev->numa_node);
612         if (idxd->batch_comp_ring == NULL) {
613                 IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
614                 ret = -ENOMEM;
615                 goto cleanup;
616         }
617         idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
618         idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
619
620         dmadev->fp_obj->dev_private = idxd;
621
622         idxd->dmadev->state = RTE_DMA_DEV_READY;
623
624         return 0;
625
626 cleanup:
627         if (dmadev)
628                 rte_dma_pmd_release(name);
629
630         return ret;
631 }
632
633 int idxd_pmd_logtype;
634
635 RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);