1 /* SPDX-License-Identifier: BSD-3-Clause
13 #include <sys/types.h>
14 #include <sys/queue.h>
16 #include <rte_byteorder.h>
18 #include <rte_debug.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
32 #include "rte_rawdev.h"
33 #include "rte_rawdev_pmd.h"
35 /* dynamic log identifier */
36 int librawdev_logtype;
38 struct rte_rawdev rte_rawdevices[RTE_RAWDEV_MAX_DEVS];
40 struct rte_rawdev *rte_rawdevs = &rte_rawdevices[0];
42 static struct rte_rawdev_global rawdev_globals = {
46 struct rte_rawdev_global *rte_rawdev_globals = &rawdev_globals;
48 /* Raw device, northbound API implementation */
50 rte_rawdev_count(void)
52 return rte_rawdev_globals->nb_devs;
56 rte_rawdev_get_dev_id(const char *name)
63 for (i = 0; i < rte_rawdev_globals->nb_devs; i++)
64 if ((strcmp(rte_rawdevices[i].name, name)
66 (rte_rawdevices[i].attached ==
73 rte_rawdev_socket_id(uint16_t dev_id)
75 struct rte_rawdev *dev;
77 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
78 dev = &rte_rawdevs[dev_id];
80 return dev->socket_id;
84 rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
86 struct rte_rawdev *rawdev;
88 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
89 RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
91 rawdev = &rte_rawdevs[dev_id];
93 RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP);
94 (*rawdev->dev_ops->dev_info_get)(rawdev, dev_info->dev_private);
98 dev_info->driver_name = rawdev->driver_name;
99 dev_info->device = rawdev->device;
106 rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf)
108 struct rte_rawdev *dev;
111 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
112 RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
114 dev = &rte_rawdevs[dev_id];
116 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
120 "device %d must be stopped to allow configuration", dev_id);
124 /* Configure the device */
125 diag = (*dev->dev_ops->dev_configure)(dev, dev_conf->dev_private);
127 RTE_RDEV_ERR("dev%d dev_configure = %d", dev_id, diag);
135 rte_rawdev_queue_conf_get(uint16_t dev_id,
137 rte_rawdev_obj_t queue_conf)
139 struct rte_rawdev *dev;
141 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
142 dev = &rte_rawdevs[dev_id];
144 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
145 (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
150 rte_rawdev_queue_setup(uint16_t dev_id,
152 rte_rawdev_obj_t queue_conf)
154 struct rte_rawdev *dev;
156 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
157 dev = &rte_rawdevs[dev_id];
159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
160 return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
164 rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
166 struct rte_rawdev *dev;
168 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
169 dev = &rte_rawdevs[dev_id];
171 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
172 return (*dev->dev_ops->queue_release)(dev, queue_id);
176 rte_rawdev_queue_count(uint16_t dev_id)
178 struct rte_rawdev *dev;
180 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
181 dev = &rte_rawdevs[dev_id];
183 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_count, -ENOTSUP);
184 return (*dev->dev_ops->queue_count)(dev);
188 rte_rawdev_get_attr(uint16_t dev_id,
189 const char *attr_name,
190 uint64_t *attr_value)
192 struct rte_rawdev *dev;
194 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
195 dev = &rte_rawdevs[dev_id];
197 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_get, -ENOTSUP);
198 return (*dev->dev_ops->attr_get)(dev, attr_name, attr_value);
202 rte_rawdev_set_attr(uint16_t dev_id,
203 const char *attr_name,
204 const uint64_t attr_value)
206 struct rte_rawdev *dev;
208 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
209 dev = &rte_rawdevs[dev_id];
211 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_set, -ENOTSUP);
212 return (*dev->dev_ops->attr_set)(dev, attr_name, attr_value);
216 rte_rawdev_enqueue_buffers(uint16_t dev_id,
217 struct rte_rawdev_buf **buffers,
219 rte_rawdev_obj_t context)
221 struct rte_rawdev *dev;
223 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
224 dev = &rte_rawdevs[dev_id];
226 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->enqueue_bufs, -ENOTSUP);
227 return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context);
231 rte_rawdev_dequeue_buffers(uint16_t dev_id,
232 struct rte_rawdev_buf **buffers,
234 rte_rawdev_obj_t context)
236 struct rte_rawdev *dev;
238 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
239 dev = &rte_rawdevs[dev_id];
241 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dequeue_bufs, -ENOTSUP);
242 return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
246 rte_rawdev_dump(uint16_t dev_id, FILE *f)
248 struct rte_rawdev *dev;
250 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
251 dev = &rte_rawdevs[dev_id];
253 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
254 return (*dev->dev_ops->dump)(dev, f);
258 xstats_get_count(uint16_t dev_id)
260 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
262 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
263 return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
267 rte_rawdev_xstats_names_get(uint16_t dev_id,
268 struct rte_rawdev_xstats_name *xstats_names,
271 const struct rte_rawdev *dev;
272 int cnt_expected_entries;
274 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
276 cnt_expected_entries = xstats_get_count(dev_id);
278 if (xstats_names == NULL || cnt_expected_entries < 0 ||
279 (int)size < cnt_expected_entries || size <= 0)
280 return cnt_expected_entries;
282 dev = &rte_rawdevs[dev_id];
284 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
285 return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
288 /* retrieve rawdev extended statistics */
290 rte_rawdev_xstats_get(uint16_t dev_id,
291 const unsigned int ids[],
295 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
296 const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
298 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
299 return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
303 rte_rawdev_xstats_by_name_get(uint16_t dev_id,
307 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
308 const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
309 unsigned int temp = -1;
312 *id = (unsigned int)-1;
314 id = &temp; /* driver never gets a NULL value */
316 /* implemented by driver */
317 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_by_name, -ENOTSUP);
318 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
322 rte_rawdev_xstats_reset(uint16_t dev_id,
323 const uint32_t ids[], uint32_t nb_ids)
325 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
326 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
328 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
329 return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
333 rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
335 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
336 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
338 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_status_get, -ENOTSUP);
339 return (*dev->dev_ops->firmware_status_get)(dev, status_info);
343 rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
345 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
346 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
348 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_version_get, -ENOTSUP);
349 return (*dev->dev_ops->firmware_version_get)(dev, version_info);
353 rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
355 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
356 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
361 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
362 return (*dev->dev_ops->firmware_load)(dev, firmware_image);
366 rte_rawdev_firmware_unload(uint16_t dev_id)
368 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
369 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
371 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
372 return (*dev->dev_ops->firmware_unload)(dev);
376 rte_rawdev_selftest(uint16_t dev_id)
378 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
379 struct rte_rawdev *dev = &rte_rawdevs[dev_id];
381 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
382 return (*dev->dev_ops->dev_selftest)();
386 rte_rawdev_start(uint16_t dev_id)
388 struct rte_rawdev *dev;
391 RTE_RDEV_DEBUG("Start dev_id=%" PRIu8, dev_id);
393 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
394 dev = &rte_rawdevs[dev_id];
395 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
397 if (dev->started != 0) {
398 RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already started",
403 diag = (*dev->dev_ops->dev_start)(dev);
413 rte_rawdev_stop(uint16_t dev_id)
415 struct rte_rawdev *dev;
417 RTE_RDEV_DEBUG("Stop dev_id=%" PRIu8, dev_id);
419 RTE_RAWDEV_VALID_DEVID_OR_RET(dev_id);
420 dev = &rte_rawdevs[dev_id];
422 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
424 if (dev->started == 0) {
425 RTE_RDEV_ERR("Device with dev_id=%" PRIu8 "already stopped",
430 (*dev->dev_ops->dev_stop)(dev);
435 rte_rawdev_close(uint16_t dev_id)
437 struct rte_rawdev *dev;
439 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
440 dev = &rte_rawdevs[dev_id];
442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
443 /* Device must be stopped before it can be closed */
444 if (dev->started == 1) {
445 RTE_RDEV_ERR("Device %u must be stopped before closing",
450 return (*dev->dev_ops->dev_close)(dev);
454 rte_rawdev_reset(uint16_t dev_id)
456 struct rte_rawdev *dev;
458 RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
459 dev = &rte_rawdevs[dev_id];
461 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
462 /* Reset is not dependent on state of the device */
463 return (*dev->dev_ops->dev_reset)(dev);
466 static inline uint8_t
467 rte_rawdev_find_free_device_index(void)
471 for (dev_id = 0; dev_id < RTE_RAWDEV_MAX_DEVS; dev_id++) {
472 if (rte_rawdevs[dev_id].attached ==
477 return RTE_RAWDEV_MAX_DEVS;
481 rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
483 struct rte_rawdev *rawdev;
486 if (rte_rawdev_pmd_get_named_dev(name) != NULL) {
487 RTE_RDEV_ERR("Event device with name %s already allocated!",
492 dev_id = rte_rawdev_find_free_device_index();
493 if (dev_id == RTE_RAWDEV_MAX_DEVS) {
494 RTE_RDEV_ERR("Reached maximum number of raw devices");
498 rawdev = &rte_rawdevs[dev_id];
500 rawdev->dev_private = rte_zmalloc_socket("rawdev private",
504 if (!rawdev->dev_private) {
505 RTE_RDEV_ERR("Unable to allocate memory to Skeleton dev");
510 rawdev->dev_id = dev_id;
511 rawdev->socket_id = socket_id;
513 snprintf(rawdev->name, RTE_RAWDEV_NAME_MAX_LEN, "%s", name);
515 rawdev->attached = RTE_RAWDEV_ATTACHED;
516 rawdev_globals.nb_devs++;
522 rte_rawdev_pmd_release(struct rte_rawdev *rawdev)
529 ret = rte_rawdev_close(rawdev->dev_id);
533 rawdev->attached = RTE_RAWDEV_DETACHED;
534 rawdev_globals.nb_devs--;
537 rawdev->socket_id = 0;
538 rawdev->dev_ops = NULL;
539 if (rawdev->dev_private) {
540 rte_free(rawdev->dev_private);
541 rawdev->dev_private = NULL;
547 RTE_INIT(librawdev_init_log)
549 librawdev_logtype = rte_log_register("lib.rawdev");
550 if (librawdev_logtype >= 0)
551 rte_log_set_level(librawdev_logtype, RTE_LOG_INFO);