dma/idxd: add datapath structures
[dpdk.git] / drivers / dma / idxd / idxd_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_common.h>
7 #include <rte_log.h>
8
9 #include "idxd_internal.h"
10
11 #define IDXD_PMD_NAME_STR "dmadev_idxd"
12
13 int
14 idxd_dump(const struct rte_dma_dev *dev, FILE *f)
15 {
16         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
17         unsigned int i;
18
19         fprintf(f, "== IDXD Private Data ==\n");
20         fprintf(f, "  Portal: %p\n", idxd->portal);
21         fprintf(f, "  Config: { ring_size: %u }\n",
22                         idxd->qcfg.nb_desc);
23         fprintf(f, "  Batch ring (sz = %u, max_batches = %u):\n\t",
24                         idxd->max_batches + 1, idxd->max_batches);
25         for (i = 0; i <= idxd->max_batches; i++) {
26                 fprintf(f, " %u ", idxd->batch_idx_ring[i]);
27                 if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
28                         fprintf(f, "[rd ptr, wr ptr] ");
29                 else if (i == idxd->batch_idx_read)
30                         fprintf(f, "[rd ptr] ");
31                 else if (i == idxd->batch_idx_write)
32                         fprintf(f, "[wr ptr] ");
33                 if (i == idxd->max_batches)
34                         fprintf(f, "\n");
35         }
36
37         fprintf(f, "  Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
38         fprintf(f, "  IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
39         return 0;
40 }
41
42 int
43 idxd_dmadev_create(const char *name, struct rte_device *dev,
44                    const struct idxd_dmadev *base_idxd,
45                    const struct rte_dma_dev_ops *ops)
46 {
47         struct idxd_dmadev *idxd = NULL;
48         struct rte_dma_dev *dmadev = NULL;
49         int ret = 0;
50
51         RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
52         RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
53         RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
54
55         if (!name) {
56                 IDXD_PMD_ERR("Invalid name of the device!");
57                 ret = -EINVAL;
58                 goto cleanup;
59         }
60
61         /* Allocate device structure */
62         dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
63         if (dmadev == NULL) {
64                 IDXD_PMD_ERR("Unable to allocate dma device");
65                 ret = -ENOMEM;
66                 goto cleanup;
67         }
68         dmadev->dev_ops = ops;
69         dmadev->device = dev;
70
71         idxd = dmadev->data->dev_private;
72         *idxd = *base_idxd; /* copy over the main fields already passed in */
73         idxd->dmadev = dmadev;
74
75         /* allocate batch index ring and completion ring.
76          * The +1 is because we can never fully use
77          * the ring, otherwise read == write means both full and empty.
78          */
79         idxd->batch_comp_ring = rte_zmalloc_socket(NULL, (sizeof(idxd->batch_idx_ring[0]) +
80                         sizeof(idxd->batch_comp_ring[0]))       * (idxd->max_batches + 1),
81                         sizeof(idxd->batch_comp_ring[0]), dev->numa_node);
82         if (idxd->batch_comp_ring == NULL) {
83                 IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
84                 ret = -ENOMEM;
85                 goto cleanup;
86         }
87         idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
88         idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
89
90         dmadev->fp_obj->dev_private = idxd;
91
92         idxd->dmadev->state = RTE_DMA_DEV_READY;
93
94         return 0;
95
96 cleanup:
97         if (dmadev)
98                 rte_dma_pmd_release(name);
99
100         return ret;
101 }
102
103 int idxd_pmd_logtype;
104
105 RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);