1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 * Wireless base band device abstraction APIs.
13 * This API allows an application to discover, configure and use a device to
14 * process operations. An asynchronous API (enqueue, followed by later dequeue)
15 * is used for processing operations.
17 * The functions in this API are not thread-safe when called on the same
18 * target object (a device, or a queue on a device), with the exception that
19 * one thread can enqueue operations to a queue while another thread dequeues
20 * from the same queue.
31 #include <rte_compat.h>
33 #include <rte_cpuflags.h>
34 #include <rte_memory.h>
36 #include "rte_bbdev_op.h"
38 #ifndef RTE_BBDEV_MAX_DEVS
39 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
42 /** Flags indicate current state of BBDEV device */
43 enum rte_bbdev_state {
49 * Get the total number of devices that have been successfully initialised.
52 * The total number of usable devices.
55 rte_bbdev_count(void);
58 * Check if a device is valid.
61 * The identifier of the device.
64 * true if device ID is valid and device is attached, false otherwise.
67 rte_bbdev_is_valid(uint16_t dev_id);
70 * Get the next enabled device.
76 * - The next device, or
77 * - RTE_BBDEV_MAX_DEVS if none found
80 rte_bbdev_find_next(uint16_t dev_id);
82 /** Iterate through all enabled devices */
83 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
84 i < RTE_BBDEV_MAX_DEVS; \
85 i = rte_bbdev_find_next(i))
88 * Setup up device queues.
89 * This function must be called on a device before setting up the queues and
90 * starting the device. It can also be called when a device is in the stopped
91 * state. If any device queues have been configured their configuration will be
92 * cleared by a call to this function.
95 * The identifier of the device.
97 * Number of queues to configure on device.
99 * ID of a socket which will be used to allocate memory.
103 * - -ENODEV if dev_id is invalid or the device is corrupted
104 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
105 * - -EBUSY if the identified device has already started
106 * - -ENOMEM if unable to allocate memory
109 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
113 * This function may be called before starting the device to enable the
114 * interrupts if they are available.
117 * The identifier of the device.
121 * - -ENODEV if dev_id is invalid or the device is corrupted
122 * - -EBUSY if the identified device has already started
123 * - -ENOTSUP if the interrupts are not supported by the device
126 rte_bbdev_intr_enable(uint16_t dev_id);
128 /** Device queue configuration structure */
129 struct rte_bbdev_queue_conf {
130 int socket; /**< NUMA socket used for memory allocation */
131 uint32_t queue_size; /**< Size of queue */
132 uint8_t priority; /**< Queue priority */
133 bool deferred_start; /**< Do not start queue when device is started. */
134 enum rte_bbdev_op_type op_type; /**< Operation type */
138 * Configure a queue on a device.
139 * This function can be called after device configuration, and before starting.
140 * It can also be called when the device or the queue is in the stopped state.
143 * The identifier of the device.
145 * The index of the queue.
147 * The queue configuration. If NULL, a default configuration will be used.
151 * - EINVAL if the identified queue size or priority are invalid
152 * - EBUSY if the identified queue or its device have already started
155 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
156 const struct rte_bbdev_queue_conf *conf);
160 * This is the last step needed before enqueueing operations is possible.
163 * The identifier of the device.
167 * - negative value on failure - as returned from PMD
170 rte_bbdev_start(uint16_t dev_id);
174 * The device can be reconfigured, and restarted after being stopped.
177 * The identifier of the device.
183 rte_bbdev_stop(uint16_t dev_id);
187 * The device cannot be restarted without reconfiguration!
190 * The identifier of the device.
196 rte_bbdev_close(uint16_t dev_id);
199 * Start a specified queue on a device.
200 * This is only needed if the queue has been stopped, or if the deferred_start
201 * flag has been set when configuring the queue.
204 * The identifier of the device.
206 * The index of the queue.
210 * - negative value on failure - as returned from PMD
213 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
216 * Stop a specified queue on a device, to allow re configuration.
219 * The identifier of the device.
221 * The index of the queue.
225 * - negative value on failure - as returned from PMD
228 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
230 /** Device statistics. */
231 struct rte_bbdev_stats {
232 uint64_t enqueued_count; /**< Count of all operations enqueued */
233 uint64_t dequeued_count; /**< Count of all operations dequeued */
234 /** Total error count on operations enqueued */
235 uint64_t enqueue_err_count;
236 /** Total error count on operations dequeued */
237 uint64_t dequeue_err_count;
238 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
239 * the enqueue request to its internal queues.
240 * - For a HW device this is the cycles consumed in MMIO write
241 * - For a SW (vdev) device, this is the processing time of the
244 uint64_t acc_offload_cycles;
248 * Retrieve the general I/O statistics of a device.
251 * The identifier of the device.
253 * Pointer to structure to where statistics will be copied. On error, this
254 * location may or may not have been modified.
258 * - EINVAL if invalid parameter pointer is provided
261 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
264 * Reset the statistics of a device.
267 * The identifier of the device.
272 rte_bbdev_stats_reset(uint16_t dev_id);
274 /** Device information supplied by the device's driver */
275 struct rte_bbdev_driver_info {
277 const char *driver_name;
279 /** Maximum number of queues supported by the device */
280 unsigned int max_num_queues;
281 /** Queue size limit (queue size must also be power of 2) */
282 uint32_t queue_size_lim;
283 /** Set if device off-loads operation to hardware */
284 bool hardware_accelerated;
285 /** Max value supported by queue priority for DL */
286 uint8_t max_dl_queue_priority;
287 /** Max value supported by queue priority for UL */
288 uint8_t max_ul_queue_priority;
289 /** Set if device supports per-queue interrupts */
290 bool queue_intr_supported;
291 /** Minimum alignment of buffers, in bytes */
292 uint16_t min_alignment;
293 /** HARQ memory available in kB */
294 uint32_t harq_buffer_size;
295 /** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
296 * for input/output data
298 uint8_t data_endianness;
299 /** Default queue configuration used if none is supplied */
300 struct rte_bbdev_queue_conf default_queue_conf;
301 /** Device operation capabilities */
302 const struct rte_bbdev_op_cap *capabilities;
303 /** Device cpu_flag requirements */
304 const enum rte_cpu_flag_t *cpu_flag_reqs;
307 /** Macro used at end of bbdev PMD list */
308 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
309 { RTE_BBDEV_OP_NONE }
312 * Device information structure used by an application to discover a devices
313 * capabilities and current configuration
315 struct rte_bbdev_info {
316 int socket_id; /**< NUMA socket that device is on */
317 const char *dev_name; /**< Unique device name */
318 const struct rte_device *device; /**< Device Information */
319 uint16_t num_queues; /**< Number of queues currently configured */
320 bool started; /**< Set if device is currently started */
321 struct rte_bbdev_driver_info drv; /**< Info from device driver */
325 * Retrieve information about a device.
328 * The identifier of the device.
330 * Pointer to structure to where information will be copied. On error, this
331 * location may or may not have been modified.
335 * - EINVAL if invalid parameter pointer is provided
338 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
340 /** Queue information */
341 struct rte_bbdev_queue_info {
342 /** Current device configuration */
343 struct rte_bbdev_queue_conf conf;
344 /** Set if queue is currently started */
349 * Retrieve information about a specific queue on a device.
352 * The identifier of the device.
354 * The index of the queue.
356 * Pointer to structure to where information will be copied. On error, this
357 * location may or may not have been modified.
361 * - EINVAL if invalid parameter pointer is provided
364 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
365 struct rte_bbdev_queue_info *queue_info);
367 /** @internal The data structure associated with each queue of a device. */
368 struct rte_bbdev_queue_data {
369 void *queue_private; /**< Driver-specific per-queue data */
370 struct rte_bbdev_queue_conf conf; /**< Current configuration */
371 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
372 bool started; /**< Queue state */
375 /** @internal Enqueue encode operations for processing on queue of a device. */
376 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
377 struct rte_bbdev_queue_data *q_data,
378 struct rte_bbdev_enc_op **ops,
381 /** @internal Enqueue decode operations for processing on queue of a device. */
382 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
383 struct rte_bbdev_queue_data *q_data,
384 struct rte_bbdev_dec_op **ops,
387 /** @internal Dequeue encode operations from a queue of a device. */
388 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
389 struct rte_bbdev_queue_data *q_data,
390 struct rte_bbdev_enc_op **ops, uint16_t num);
392 /** @internal Dequeue decode operations from a queue of a device. */
393 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
394 struct rte_bbdev_queue_data *q_data,
395 struct rte_bbdev_dec_op **ops, uint16_t num);
397 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
400 * @internal The data associated with a device, with no function pointers.
401 * This structure is safe to place in shared memory to be common among
402 * different processes in a multi-process configuration. Drivers can access
403 * these fields, but should never write to them!
405 struct rte_bbdev_data {
406 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
407 void *dev_private; /**< Driver-specific private data */
408 uint16_t num_queues; /**< Number of currently configured queues */
409 struct rte_bbdev_queue_data *queues; /**< Queue structures */
410 uint16_t dev_id; /**< Device ID */
411 int socket_id; /**< NUMA socket that device is on */
412 bool started; /**< Device run-time state */
413 uint16_t process_cnt; /** Counter of processes using the device */
416 /* Forward declarations */
417 struct rte_bbdev_ops;
418 struct rte_bbdev_callback;
419 struct rte_intr_handle;
421 /** Structure to keep track of registered callbacks */
422 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
425 * @internal The data structure associated with a device. Drivers can access
426 * these fields, but should only write to the *_ops fields.
428 struct __rte_cache_aligned rte_bbdev {
429 /** Enqueue encode function */
430 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
431 /** Enqueue decode function */
432 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
433 /** Dequeue encode function */
434 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
435 /** Dequeue decode function */
436 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
437 /** Enqueue encode function */
438 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
439 /** Enqueue decode function */
440 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
441 /** Dequeue encode function */
442 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
443 /** Dequeue decode function */
444 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
445 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
446 struct rte_bbdev_data *data; /**< Pointer to device data */
447 enum rte_bbdev_state state; /**< If device is currently used or not */
448 struct rte_device *device; /**< Backing device */
449 /** User application callback for interrupts if present */
450 struct rte_bbdev_cb_list list_cbs;
451 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
454 /** @internal array of all devices */
455 extern struct rte_bbdev rte_bbdev_devices[];
458 * Enqueue a burst of processed encode operations to a queue of the device.
459 * This functions only enqueues as many operations as currently possible and
460 * does not block until @p num_ops entries in the queue are available.
461 * This function does not provide any error notification to avoid the
462 * corresponding overhead.
465 * The identifier of the device.
467 * The index of the queue.
469 * Pointer array containing operations to be enqueued Must have at least
472 * The maximum number of operations to enqueue.
475 * The number of operations actually enqueued (this is the number of processed
476 * entries in the @p ops array).
478 static inline uint16_t
479 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
480 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
482 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
483 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
484 return dev->enqueue_enc_ops(q_data, ops, num_ops);
488 * Enqueue a burst of processed decode operations to a queue of the device.
489 * This functions only enqueues as many operations as currently possible and
490 * does not block until @p num_ops entries in the queue are available.
491 * This function does not provide any error notification to avoid the
492 * corresponding overhead.
495 * The identifier of the device.
497 * The index of the queue.
499 * Pointer array containing operations to be enqueued Must have at least
502 * The maximum number of operations to enqueue.
505 * The number of operations actually enqueued (this is the number of processed
506 * entries in the @p ops array).
508 static inline uint16_t
509 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
510 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
512 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
513 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
514 return dev->enqueue_dec_ops(q_data, ops, num_ops);
518 * Enqueue a burst of processed encode operations to a queue of the device.
519 * This functions only enqueues as many operations as currently possible and
520 * does not block until @p num_ops entries in the queue are available.
521 * This function does not provide any error notification to avoid the
522 * corresponding overhead.
525 * The identifier of the device.
527 * The index of the queue.
529 * Pointer array containing operations to be enqueued Must have at least
532 * The maximum number of operations to enqueue.
535 * The number of operations actually enqueued (this is the number of processed
536 * entries in the @p ops array).
538 static inline uint16_t
539 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
540 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
542 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
543 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
544 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
548 * Enqueue a burst of processed decode operations to a queue of the device.
549 * This functions only enqueues as many operations as currently possible and
550 * does not block until @p num_ops entries in the queue are available.
551 * This function does not provide any error notification to avoid the
552 * corresponding overhead.
555 * The identifier of the device.
557 * The index of the queue.
559 * Pointer array containing operations to be enqueued Must have at least
562 * The maximum number of operations to enqueue.
565 * The number of operations actually enqueued (this is the number of processed
566 * entries in the @p ops array).
568 static inline uint16_t
569 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
570 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
572 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
573 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
574 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
579 * Dequeue a burst of processed encode operations from a queue of the device.
580 * This functions returns only the current contents of the queue, and does not
581 * block until @ num_ops is available.
582 * This function does not provide any error notification to avoid the
583 * corresponding overhead.
586 * The identifier of the device.
588 * The index of the queue.
590 * Pointer array where operations will be dequeued to. Must have at least
592 * ie. A pointer to a table of void * pointers (ops) that will be filled.
594 * The maximum number of operations to dequeue.
597 * The number of operations actually dequeued (this is the number of entries
598 * copied into the @p ops array).
600 static inline uint16_t
601 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
602 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
604 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
605 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
606 return dev->dequeue_enc_ops(q_data, ops, num_ops);
610 * Dequeue a burst of processed decode operations from a queue of the device.
611 * This functions returns only the current contents of the queue, and does not
612 * block until @ num_ops is available.
613 * This function does not provide any error notification to avoid the
614 * corresponding overhead.
617 * The identifier of the device.
619 * The index of the queue.
621 * Pointer array where operations will be dequeued to. Must have at least
623 * ie. A pointer to a table of void * pointers (ops) that will be filled.
625 * The maximum number of operations to dequeue.
628 * The number of operations actually dequeued (this is the number of entries
629 * copied into the @p ops array).
632 static inline uint16_t
633 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
634 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
636 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
637 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
638 return dev->dequeue_dec_ops(q_data, ops, num_ops);
643 * Dequeue a burst of processed encode operations from a queue of the device.
644 * This functions returns only the current contents of the queue, and does not
645 * block until @ num_ops is available.
646 * This function does not provide any error notification to avoid the
647 * corresponding overhead.
650 * The identifier of the device.
652 * The index of the queue.
654 * Pointer array where operations will be dequeued to. Must have at least
657 * The maximum number of operations to dequeue.
660 * The number of operations actually dequeued (this is the number of entries
661 * copied into the @p ops array).
663 static inline uint16_t
664 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
665 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
667 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
668 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
669 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
673 * Dequeue a burst of processed decode operations from a queue of the device.
674 * This functions returns only the current contents of the queue, and does not
675 * block until @ num_ops is available.
676 * This function does not provide any error notification to avoid the
677 * corresponding overhead.
680 * The identifier of the device.
682 * The index of the queue.
684 * Pointer array where operations will be dequeued to. Must have at least
687 * The maximum number of operations to dequeue.
690 * The number of operations actually dequeued (this is the number of entries
691 * copied into the @p ops array).
693 static inline uint16_t
694 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
695 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
697 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
698 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
699 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
702 /** Definitions of device event types */
703 enum rte_bbdev_event_type {
704 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
705 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
706 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
707 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
711 * Typedef for application callback function registered by application
712 * software for notification of device events
717 * Device event to register for notification of.
719 * User specified parameter to be passed to user's callback function.
721 * To pass data back to user application.
723 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
724 enum rte_bbdev_event_type event, void *cb_arg,
728 * Register a callback function for specific device id. Multiple callbacks can
729 * be added and will be called in the order they are added when an event is
730 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
735 * The event that the callback will be registered for.
737 * User supplied callback function to be called.
739 * Pointer to parameter that will be passed to the callback.
742 * Zero on success, negative value on failure.
745 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
746 rte_bbdev_cb_fn cb_fn, void *cb_arg);
749 * Unregister a callback function for specific device id.
752 * The device identifier.
754 * The event that the callback will be unregistered for.
756 * User supplied callback function to be unregistered.
758 * Pointer to the parameter supplied when registering the callback.
759 * (void *)-1 means to remove all registered callbacks with the specified
764 * - EINVAL if invalid parameter pointer is provided
765 * - EAGAIN if the provided callback pointer does not exist
768 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
769 rte_bbdev_cb_fn cb_fn, void *cb_arg);
772 * Enable a one-shot interrupt on the next operation enqueued to a particular
773 * queue. The interrupt will be triggered when the operation is ready to be
774 * dequeued. To handle the interrupt, an epoll file descriptor must be
775 * registered using rte_bbdev_queue_intr_ctl(), and then an application
776 * thread/lcore can wait for the interrupt using rte_epoll_wait().
779 * The device identifier.
781 * The index of the queue.
785 * - negative value on failure - as returned from PMD
788 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
791 * Disable a one-shot interrupt on the next operation enqueued to a particular
792 * queue (if it has been enabled).
795 * The device identifier.
797 * The index of the queue.
801 * - negative value on failure - as returned from PMD
804 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
807 * Control interface for per-queue interrupts.
810 * The device identifier.
812 * The index of the queue.
814 * Epoll file descriptor that will be associated with the interrupt source.
815 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
816 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
817 * be used when calling rte_epoll_wait()).
819 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
820 * RTE_INTR_EVENT_DEL.
822 * User context, that will be returned in the epdata.data field of the
823 * rte_epoll_event structure filled in by rte_epoll_wait().
827 * - ENOTSUP if interrupts are not supported by the identified device
828 * - negative value on failure - as returned from PMD
831 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
838 #endif /* _RTE_BBDEV_H_ */