raw/cnxk_bphy: support CGX enqueue operation
[dpdk.git] / drivers / raw / ioat / ioat_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_rawdev_pmd.h>
6 #include <rte_memzone.h>
7 #include <rte_common.h>
8 #include <rte_string_fns.h>
9
10 #include "ioat_private.h"
11
12 static const char * const xstat_names[] = {
13                 "failed_enqueues", "successful_enqueues",
14                 "copies_started", "copies_completed"
15 };
16
17 int
18 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
19                 uint64_t values[], unsigned int n)
20 {
21         const struct rte_ioat_rawdev *ioat = dev->dev_private;
22         const uint64_t *stats = (const void *)&ioat->xstats;
23         unsigned int i;
24
25         for (i = 0; i < n; i++) {
26                 if (ids[i] > sizeof(ioat->xstats)/sizeof(*stats))
27                         values[i] = 0;
28                 else
29                         values[i] = stats[ids[i]];
30         }
31         return n;
32 }
33
34 int
35 ioat_xstats_get_names(const struct rte_rawdev *dev,
36                 struct rte_rawdev_xstats_name *names,
37                 unsigned int size)
38 {
39         unsigned int i;
40
41         RTE_SET_USED(dev);
42         if (size < RTE_DIM(xstat_names))
43                 return RTE_DIM(xstat_names);
44
45         for (i = 0; i < RTE_DIM(xstat_names); i++)
46                 strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
47
48         return RTE_DIM(xstat_names);
49 }
50
51 int
52 ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
53 {
54         struct rte_ioat_rawdev *ioat = dev->dev_private;
55         uint64_t *stats = (void *)&ioat->xstats;
56         unsigned int i;
57
58         if (!ids) {
59                 memset(&ioat->xstats, 0, sizeof(ioat->xstats));
60                 return 0;
61         }
62
63         for (i = 0; i < nb_ids; i++)
64                 if (ids[i] < sizeof(ioat->xstats)/sizeof(*stats))
65                         stats[ids[i]] = 0;
66
67         return 0;
68 }
69
70 int
71 idxd_rawdev_close(struct rte_rawdev *dev __rte_unused)
72 {
73         return 0;
74 }
75
76 int
77 idxd_dev_dump(struct rte_rawdev *dev, FILE *f)
78 {
79         struct idxd_rawdev *idxd = dev->dev_private;
80         struct rte_idxd_rawdev *rte_idxd = &idxd->public;
81         int i;
82
83         fprintf(f, "Raw Device #%d\n", dev->dev_id);
84         fprintf(f, "Driver: %s\n\n", dev->driver_name);
85
86         fprintf(f, "Portal: %p\n", rte_idxd->portal);
87         fprintf(f, "Config: {ring_size: %u, hdls_disable: %u}\n\n",
88                         rte_idxd->cfg.ring_size, rte_idxd->cfg.hdls_disable);
89
90         fprintf(f, "max batches: %u\n", rte_idxd->max_batches);
91         fprintf(f, "batch idx read: %u\n", rte_idxd->batch_idx_read);
92         fprintf(f, "batch idx write: %u\n", rte_idxd->batch_idx_write);
93         fprintf(f, "batch idxes:");
94         for (i = 0; i < rte_idxd->max_batches + 1; i++)
95                 fprintf(f, "%u ", rte_idxd->batch_idx_ring[i]);
96         fprintf(f, "\n\n");
97
98         fprintf(f, "hdls read: %u\n", rte_idxd->max_batches);
99         fprintf(f, "hdls avail: %u\n", rte_idxd->hdls_avail);
100         fprintf(f, "batch start: %u\n", rte_idxd->batch_start);
101         fprintf(f, "batch size: %u\n", rte_idxd->batch_size);
102
103         return 0;
104 }
105
106 int
107 idxd_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
108                 size_t info_size)
109 {
110         struct rte_ioat_rawdev_config *cfg = dev_info;
111         struct idxd_rawdev *idxd = dev->dev_private;
112         struct rte_idxd_rawdev *rte_idxd = &idxd->public;
113
114         if (info_size != sizeof(*cfg))
115                 return -EINVAL;
116
117         if (cfg != NULL)
118                 *cfg = rte_idxd->cfg;
119         return 0;
120 }
121
122 int
123 idxd_dev_configure(const struct rte_rawdev *dev,
124                 rte_rawdev_obj_t config, size_t config_size)
125 {
126         struct idxd_rawdev *idxd = dev->dev_private;
127         struct rte_idxd_rawdev *rte_idxd = &idxd->public;
128         struct rte_ioat_rawdev_config *cfg = config;
129         uint16_t max_desc = cfg->ring_size;
130
131         if (config_size != sizeof(*cfg))
132                 return -EINVAL;
133
134         if (dev->started) {
135                 IOAT_PMD_ERR("%s: Error, device is started.", __func__);
136                 return -EAGAIN;
137         }
138
139         rte_idxd->cfg = *cfg;
140
141         if (!rte_is_power_of_2(max_desc))
142                 max_desc = rte_align32pow2(max_desc);
143         IOAT_PMD_DEBUG("Rawdev %u using %u descriptors",
144                         dev->dev_id, max_desc);
145         rte_idxd->desc_ring_mask = max_desc - 1;
146
147         /* in case we are reconfiguring a device, free any existing memory */
148         rte_free(rte_idxd->desc_ring);
149         rte_free(rte_idxd->hdl_ring);
150         rte_free(rte_idxd->hdl_ring_flags);
151
152         /* allocate the descriptor ring at 2x size as batches can't wrap */
153         rte_idxd->desc_ring = rte_zmalloc(NULL,
154                         sizeof(*rte_idxd->desc_ring) * max_desc * 2, 0);
155         if (rte_idxd->desc_ring == NULL)
156                 return -ENOMEM;
157         rte_idxd->desc_iova = rte_mem_virt2iova(rte_idxd->desc_ring);
158
159         rte_idxd->hdl_ring = rte_zmalloc(NULL,
160                         sizeof(*rte_idxd->hdl_ring) * max_desc, 0);
161         if (rte_idxd->hdl_ring == NULL) {
162                 rte_free(rte_idxd->desc_ring);
163                 rte_idxd->desc_ring = NULL;
164                 return -ENOMEM;
165         }
166         rte_idxd->hdl_ring_flags = rte_zmalloc(NULL,
167                         sizeof(*rte_idxd->hdl_ring_flags) * max_desc, 0);
168         if (rte_idxd->hdl_ring_flags == NULL) {
169                 rte_free(rte_idxd->desc_ring);
170                 rte_free(rte_idxd->hdl_ring);
171                 rte_idxd->desc_ring = NULL;
172                 rte_idxd->hdl_ring = NULL;
173                 return -ENOMEM;
174         }
175         rte_idxd->hdls_read = rte_idxd->batch_start = 0;
176         rte_idxd->batch_size = 0;
177         rte_idxd->hdls_avail = 0;
178
179         return 0;
180 }
181
182 int
183 idxd_rawdev_create(const char *name, struct rte_device *dev,
184                    const struct idxd_rawdev *base_idxd,
185                    const struct rte_rawdev_ops *ops)
186 {
187         struct idxd_rawdev *idxd;
188         struct rte_idxd_rawdev *public;
189         struct rte_rawdev *rawdev = NULL;
190         const struct rte_memzone *mz = NULL;
191         char mz_name[RTE_MEMZONE_NAMESIZE];
192         int ret = 0;
193
194         RTE_BUILD_BUG_ON(sizeof(struct rte_idxd_hw_desc) != 64);
195         RTE_BUILD_BUG_ON(offsetof(struct rte_idxd_hw_desc, size) != 32);
196         RTE_BUILD_BUG_ON(sizeof(struct rte_idxd_completion) != 32);
197
198         if (!name) {
199                 IOAT_PMD_ERR("Invalid name of the device!");
200                 ret = -EINVAL;
201                 goto cleanup;
202         }
203
204         /* Allocate device structure */
205         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct idxd_rawdev),
206                                          dev->numa_node);
207         if (rawdev == NULL) {
208                 IOAT_PMD_ERR("Unable to allocate raw device");
209                 ret = -ENOMEM;
210                 goto cleanup;
211         }
212
213         /* Allocate memory for the primary process or else return the memory
214          * of primary memzone for the secondary process.
215          */
216         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
217         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
218                 mz = rte_memzone_lookup(mz_name);
219                 if (mz == NULL) {
220                         IOAT_PMD_ERR("Unable lookup memzone for private data\n");
221                         ret = -ENOMEM;
222                         goto cleanup;
223                 }
224                 rawdev->dev_private = mz->addr;
225                 rawdev->dev_ops = ops;
226                 rawdev->device = dev;
227                 return 0;
228         }
229         mz = rte_memzone_reserve(mz_name, sizeof(struct idxd_rawdev),
230                         dev->numa_node, RTE_MEMZONE_IOVA_CONTIG);
231         if (mz == NULL) {
232                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
233                 ret = -ENOMEM;
234                 goto cleanup;
235         }
236         rawdev->dev_private = mz->addr;
237         rawdev->dev_ops = ops;
238         rawdev->device = dev;
239         rawdev->driver_name = IOAT_PMD_RAWDEV_NAME_STR;
240
241         idxd = rawdev->dev_private;
242         *idxd = *base_idxd; /* copy over the main fields already passed in */
243         idxd->rawdev = rawdev;
244         idxd->mz = mz;
245
246         public = &idxd->public;
247         public->type = RTE_IDXD_DEV;
248         public->max_batches = idxd->max_batches;
249         public->batch_idx_read = 0;
250         public->batch_idx_write = 0;
251         /* allocate batch index ring. The +1 is because we can never fully use
252          * the ring, otherwise read == write means both full and empty.
253          */
254         public->batch_idx_ring = rte_zmalloc(NULL,
255                         sizeof(uint16_t) * (idxd->max_batches + 1), 0);
256         if (public->batch_idx_ring == NULL) {
257                 IOAT_PMD_ERR("Unable to reserve memory for batch data\n");
258                 ret = -ENOMEM;
259                 goto cleanup;
260         }
261
262         return 0;
263
264 cleanup:
265         if (mz)
266                 rte_memzone_free(mz);
267         if (rawdev)
268                 rte_rawdev_pmd_release(rawdev);
269
270         return ret;
271 }