1 /* SPDX-License-Identifier: BSD-3-Clause
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
32 #include "rte_rawdev.h"
33 #include "rte_rawdev_pmd.h"
35 /* dynamic log identifier */
36 int librawdev_logtype;
38 struct rte_rawdev rte_rawdevices[RTE_RAWDEV_MAX_DEVS];
40 struct rte_rawdev *rte_rawdevs = &rte_rawdevices[0];
42 static struct rte_rawdev_global rawdev_globals = {
46 struct rte_rawdev_global *rte_rawdev_globals = &rawdev_globals;
48 /* Raw device, northbound API implementation */
49 uint8_t __rte_experimental
50 rte_rawdev_count(void)
52 return rte_rawdev_globals->nb_devs;
55 uint16_t __rte_experimental
56 rte_rawdev_get_dev_id(const char *name)
63 for (i = 0; i < rte_rawdev_globals->nb_devs; i++)
64 if ((strcmp(rte_rawdevices[i].name, name)
66 (rte_rawdevices[i].attached ==
72 int __rte_experimental
73 rte_rawdev_socket_id(uint16_t dev_id)
75 struct rte_rawdev *dev;
77 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
78 dev = &rte_rawdevs[dev_id];
80 return dev->socket_id;
83 int __rte_experimental
84 rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
86 struct rte_rawdev *rawdev;
88 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
89 RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
91 rawdev = &rte_rawdevs[dev_id];
93 RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP);
94 (*rawdev->dev_ops->dev_info_get)(rawdev, dev_info->dev_private);
98 dev_info->driver_name = rawdev->driver_name;
99 dev_info->device = rawdev->device;
105 int __rte_experimental
106 rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf)
108 struct rte_rawdev *dev;
111 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
112 RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
114 dev = &rte_rawdevs[dev_id];
116 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
120 "device %d must be stopped to allow configuration", dev_id);
124 /* Configure the device */
125 diag = (*dev->dev_ops->dev_configure)(dev, dev_conf->dev_private);
127 RTE_RDEV_ERR("dev%d dev_configure = %d", dev_id, diag);
134 int __rte_experimental
135 rte_rawdev_queue_conf_get(uint16_t dev_id,
137 rte_rawdev_obj_t queue_conf)
139 struct rte_rawdev *dev;
141 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
142 dev = &rte_rawdevs[dev_id];
144 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
145 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
149 int __rte_experimental
150 rte_rawdev_queue_setup(uint16_t dev_id,
152 rte_rawdev_obj_t queue_conf)
154 struct rte_rawdev *dev;
156 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
157 dev = &rte_rawdevs[dev_id];
159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
160 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
163 int __rte_experimental
164 rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
166 struct rte_rawdev *dev;
168 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
169 dev = &rte_rawdevs[dev_id];
171 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
172 return (*dev->dev_ops->queue_release)(dev, queue_id);
175 int __rte_experimental
176 rte_rawdev_get_attr(uint16_t dev_id,
177 const char *attr_name,
178 uint64_t *attr_value)
180 struct rte_rawdev *dev;
182 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
183 dev = &rte_rawdevs[dev_id];
185 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_get, -ENOTSUP);
186 return (*dev->dev_ops->attr_get)(dev, attr_name, attr_value);
189 int __rte_experimental
190 rte_rawdev_set_attr(uint16_t dev_id,
191 const char *attr_name,
192 const uint64_t attr_value)
194 struct rte_rawdev *dev;
196 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
197 dev = &rte_rawdevs[dev_id];
199 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_set, -ENOTSUP);
200 return (*dev->dev_ops->attr_set)(dev, attr_name, attr_value);
203 int __rte_experimental
204 rte_rawdev_enqueue_buffers(uint16_t dev_id,
205 struct rte_rawdev_buf **buffers,
207 rte_rawdev_obj_t context)
209 struct rte_rawdev *dev;
211 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
212 dev = &rte_rawdevs[dev_id];
214 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->enqueue_bufs, -ENOTSUP);
215 return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context);
218 int __rte_experimental
219 rte_rawdev_dequeue_buffers(uint16_t dev_id,
220 struct rte_rawdev_buf **buffers,
222 rte_rawdev_obj_t context)
224 struct rte_rawdev *dev;
226 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
227 dev = &rte_rawdevs[dev_id];
229 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dequeue_bufs, -ENOTSUP);
230 return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
233 int __rte_experimental
234 rte_rawdev_dump(uint16_t dev_id, FILE *f)
236 struct rte_rawdev *dev;
238 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
239 dev = &rte_rawdevs[dev_id];
241 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
242 return (*dev->dev_ops->dump)(dev, f);
246 xstats_get_count(uint16_t dev_id)
248 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
250 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
251 return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
254 int __rte_experimental
255 rte_rawdev_xstats_names_get(uint16_t dev_id,
256 struct rte_rawdev_xstats_name *xstats_names,
259 const struct rte_rawdev *dev;
260 int cnt_expected_entries;
262 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
264 cnt_expected_entries = xstats_get_count(dev_id);
266 if (xstats_names == NULL || cnt_expected_entries < 0 ||
267 (int)size < cnt_expected_entries || size <= 0)
268 return cnt_expected_entries;
270 dev = &rte_rawdevs[dev_id];
272 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
273 return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
276 /* retrieve rawdev extended statistics */
277 int __rte_experimental
278 rte_rawdev_xstats_get(uint16_t dev_id,
279 const unsigned int ids[],
283 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
284 const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
287 return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
290 uint64_t __rte_experimental
291 rte_rawdev_xstats_by_name_get(uint16_t dev_id,
295 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
296 const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
297 unsigned int temp = -1;
300 *id = (unsigned int)-1;
302 id = &temp; /* driver never gets a NULL value */
304 /* implemented by driver */
305 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_by_name, -ENOTSUP);
306 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
309 int __rte_experimental
310 rte_rawdev_xstats_reset(uint16_t dev_id,
311 const uint32_t ids[], uint32_t nb_ids)
313 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
314 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
316 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
317 return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
320 int __rte_experimental
321 rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
323 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
324 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
326 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_status_get, -ENOTSUP);
327 return (*dev->dev_ops->firmware_status_get)(dev, status_info);
330 int __rte_experimental
331 rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
333 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
334 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
336 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_version_get, -ENOTSUP);
337 return (*dev->dev_ops->firmware_version_get)(dev, version_info);
340 int __rte_experimental
341 rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
343 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
344 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
349 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
350 return (*dev->dev_ops->firmware_load)(dev, firmware_image);
353 int __rte_experimental
354 rte_rawdev_firmware_unload(uint16_t dev_id)
356 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
357 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
359 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
360 return (*dev->dev_ops->firmware_unload)(dev);
363 int __rte_experimental
364 rte_rawdev_selftest(uint16_t dev_id)
366 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
367 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
370 return (*dev->dev_ops->dev_selftest)();
373 int __rte_experimental
374 rte_rawdev_start(uint16_t dev_id)
376 struct rte_rawdev *dev;
379 RTE_RDEV_DEBUG("Start dev_id=%" PRIu8, dev_id);
381 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
382 dev = &rte_rawdevs[dev_id];
383 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
385 if (dev->started != 0) {
386 RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already started",
391 diag = (*dev->dev_ops->dev_start)(dev);
400 void __rte_experimental
401 rte_rawdev_stop(uint16_t dev_id)
403 struct rte_rawdev *dev;
405 RTE_RDEV_DEBUG("Stop dev_id=%" PRIu8, dev_id);
407 RTE_RAWDEV_VALID_DEVID_OR_RET(dev_id);
408 dev = &rte_rawdevs[dev_id];
410 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
412 if (dev->started == 0) {
413 RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already stopped",
418 (*dev->dev_ops->dev_stop)(dev);
422 int __rte_experimental
423 rte_rawdev_close(uint16_t dev_id)
425 struct rte_rawdev *dev;
427 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
428 dev = &rte_rawdevs[dev_id];
430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
431 /* Device must be stopped before it can be closed */
432 if (dev->started == 1) {
433 RTE_RDEV_ERR("Device %u must be stopped before closing",
438 return (*dev->dev_ops->dev_close)(dev);
441 int __rte_experimental
442 rte_rawdev_reset(uint16_t dev_id)
444 struct rte_rawdev *dev;
446 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
447 dev = &rte_rawdevs[dev_id];
449 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
450 /* Reset is not dependent on state of the device */
451 return (*dev->dev_ops->dev_reset)(dev);
454 static inline uint8_t
455 rte_rawdev_find_free_device_index(void)
459 for (dev_id = 0; dev_id < RTE_RAWDEV_MAX_DEVS; dev_id++) {
460 if (rte_rawdevs[dev_id].attached ==
465 return RTE_RAWDEV_MAX_DEVS;
468 struct rte_rawdev * __rte_experimental
469 rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
471 struct rte_rawdev *rawdev;
474 if (rte_rawdev_pmd_get_named_dev(name) != NULL) {
475 RTE_RDEV_ERR("Event device with name %s already allocated!",
480 dev_id = rte_rawdev_find_free_device_index();
481 if (dev_id == RTE_RAWDEV_MAX_DEVS) {
482 RTE_RDEV_ERR("Reached maximum number of raw devices");
486 rawdev = &rte_rawdevs[dev_id];
488 rawdev->dev_private = rte_zmalloc_socket("rawdev private",
492 if (!rawdev->dev_private) {
493 RTE_RDEV_ERR("Unable to allocate memory to Skeleton dev");
498 rawdev->dev_id = dev_id;
499 rawdev->socket_id = socket_id;
501 snprintf(rawdev->name, RTE_RAWDEV_NAME_MAX_LEN, "%s", name);
503 rawdev->attached = RTE_RAWDEV_ATTACHED;
504 rawdev_globals.nb_devs++;
509 int __rte_experimental
510 rte_rawdev_pmd_release(struct rte_rawdev *rawdev)
517 ret = rte_rawdev_close(rawdev->dev_id);
521 rawdev->attached = RTE_RAWDEV_DETACHED;
522 rawdev_globals.nb_devs--;
525 rawdev->socket_id = 0;
526 rawdev->dev_ops = NULL;
527 if (rawdev->dev_private) {
528 rte_free(rawdev->dev_private);
529 rawdev->dev_private = NULL;
535 RTE_INIT(librawdev_init_log)
537 librawdev_logtype = rte_log_register("lib.rawdev");
538 if (librawdev_logtype >= 0)
539 rte_log_set_level(librawdev_logtype, RTE_LOG_INFO);