1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 * Wireless base band device abstraction APIs.
15 * All functions in this file may be changed or removed without prior notice.
17 * This API allows an application to discover, configure and use a device to
18 * process operations. An asynchronous API (enqueue, followed by later dequeue)
19 * is used for processing operations.
21 * The functions in this API are not thread-safe when called on the same
22 * target object (a device, or a queue on a device), with the exception that
23 * one thread can enqueue operations to a queue while another thread dequeues
24 * from the same queue.
35 #include <rte_compat.h>
37 #include <rte_cpuflags.h>
38 #include <rte_memory.h>
40 #include "rte_bbdev_op.h"
42 #ifndef RTE_BBDEV_MAX_DEVS
43 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
53 * Get the total number of devices that have been successfully initialised.
56 * The total number of usable devices.
60 rte_bbdev_count(void);
63 * Check if a device is valid.
66 * The identifier of the device.
69 * true if device ID is valid and device is attached, false otherwise.
73 rte_bbdev_is_valid(uint16_t dev_id);
76 * Get the next enabled device.
82 * - The next device, or
83 * - RTE_BBDEV_MAX_DEVS if none found
87 rte_bbdev_find_next(uint16_t dev_id);
89 /** Iterate through all enabled devices */
90 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
91 i < RTE_BBDEV_MAX_DEVS; \
92 i = rte_bbdev_find_next(i))
95 * Setup up device queues.
96 * This function must be called on a device before setting up the queues and
97 * starting the device. It can also be called when a device is in the stopped
98 * state. If any device queues have been configured their configuration will be
99 * cleared by a call to this function.
102 * The identifier of the device.
104 * Number of queues to configure on device.
106 * ID of a socket which will be used to allocate memory.
110 * - -ENODEV if dev_id is invalid or the device is corrupted
111 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
112 * - -EBUSY if the identified device has already started
113 * - -ENOMEM if unable to allocate memory
117 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
121 * This function may be called before starting the device to enable the
122 * interrupts if they are available.
125 * The identifier of the device.
129 * - -ENODEV if dev_id is invalid or the device is corrupted
130 * - -EBUSY if the identified device has already started
131 * - -ENOTSUP if the interrupts are not supported by the device
135 rte_bbdev_intr_enable(uint16_t dev_id);
137 /** Device queue configuration structure */
138 struct rte_bbdev_queue_conf {
139 int socket; /**< NUMA socket used for memory allocation */
140 uint32_t queue_size; /**< Size of queue */
141 uint8_t priority; /**< Queue priority */
142 bool deferred_start; /**< Do not start queue when device is started. */
143 enum rte_bbdev_op_type op_type; /**< Operation type */
147 * Configure a queue on a device.
148 * This function can be called after device configuration, and before starting.
149 * It can also be called when the device or the queue is in the stopped state.
152 * The identifier of the device.
154 * The index of the queue.
156 * The queue configuration. If NULL, a default configuration will be used.
160 * - EINVAL if the identified queue size or priority are invalid
161 * - EBUSY if the identified queue or its device have already started
165 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
166 const struct rte_bbdev_queue_conf *conf);
170 * This is the last step needed before enqueueing operations is possible.
173 * The identifier of the device.
177 * - negative value on failure - as returned from PMD driver
181 rte_bbdev_start(uint16_t dev_id);
185 * The device can be reconfigured, and restarted after being stopped.
188 * The identifier of the device.
195 rte_bbdev_stop(uint16_t dev_id);
199 * The device cannot be restarted without reconfiguration!
202 * The identifier of the device.
209 rte_bbdev_close(uint16_t dev_id);
212 * Start a specified queue on a device.
213 * This is only needed if the queue has been stopped, or if the deferred_start
214 * flag has been set when configuring the queue.
217 * The identifier of the device.
219 * The index of the queue.
223 * - negative value on failure - as returned from PMD driver
227 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
230 * Stop a specified queue on a device, to allow re configuration.
233 * The identifier of the device.
235 * The index of the queue.
239 * - negative value on failure - as returned from PMD driver
243 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
245 /** Device statistics. */
246 struct rte_bbdev_stats {
247 uint64_t enqueued_count; /**< Count of all operations enqueued */
248 uint64_t dequeued_count; /**< Count of all operations dequeued */
249 /** Total error count on operations enqueued */
250 uint64_t enqueue_err_count;
251 /** Total error count on operations dequeued */
252 uint64_t dequeue_err_count;
253 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
254 * the enqueue request to its internal queues.
255 * - For a HW device this is the cycles consumed in MMIO write
256 * - For a SW (vdev) device, this is the processing time of the
259 uint64_t acc_offload_cycles;
263 * Retrieve the general I/O statistics of a device.
266 * The identifier of the device.
268 * Pointer to structure to where statistics will be copied. On error, this
269 * location may or may not have been modified.
273 * - EINVAL if invalid parameter pointer is provided
277 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
280 * Reset the statistics of a device.
283 * The identifier of the device.
289 rte_bbdev_stats_reset(uint16_t dev_id);
291 /** Device information supplied by the device's driver */
292 struct rte_bbdev_driver_info {
294 const char *driver_name;
296 /** Maximum number of queues supported by the device */
297 unsigned int max_num_queues;
298 /** Queue size limit (queue size must also be power of 2) */
299 uint32_t queue_size_lim;
300 /** Set if device off-loads operation to hardware */
301 bool hardware_accelerated;
302 /** Max value supported by queue priority for DL */
303 uint8_t max_dl_queue_priority;
304 /** Max value supported by queue priority for UL */
305 uint8_t max_ul_queue_priority;
306 /** Set if device supports per-queue interrupts */
307 bool queue_intr_supported;
308 /** Minimum alignment of buffers, in bytes */
309 uint16_t min_alignment;
310 /** HARQ memory available in kB */
311 uint32_t harq_buffer_size;
312 /** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
313 * for input/output data
315 uint8_t data_endianness;
316 /** Default queue configuration used if none is supplied */
317 struct rte_bbdev_queue_conf default_queue_conf;
318 /** Device operation capabilities */
319 const struct rte_bbdev_op_cap *capabilities;
320 /** Device cpu_flag requirements */
321 const enum rte_cpu_flag_t *cpu_flag_reqs;
324 /** Macro used at end of bbdev PMD list */
325 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
326 { RTE_BBDEV_OP_NONE }
329 * Device information structure used by an application to discover a devices
330 * capabilities and current configuration
332 struct rte_bbdev_info {
333 int socket_id; /**< NUMA socket that device is on */
334 const char *dev_name; /**< Unique device name */
335 const struct rte_device *device; /**< Device Information */
336 uint16_t num_queues; /**< Number of queues currently configured */
337 bool started; /**< Set if device is currently started */
338 struct rte_bbdev_driver_info drv; /**< Info from device driver */
342 * Retrieve information about a device.
345 * The identifier of the device.
347 * Pointer to structure to where information will be copied. On error, this
348 * location may or may not have been modified.
352 * - EINVAL if invalid parameter pointer is provided
356 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
358 /** Queue information */
359 struct rte_bbdev_queue_info {
360 /** Current device configuration */
361 struct rte_bbdev_queue_conf conf;
362 /** Set if queue is currently started */
367 * Retrieve information about a specific queue on a device.
370 * The identifier of the device.
372 * The index of the queue.
374 * Pointer to structure to where information will be copied. On error, this
375 * location may or may not have been modified.
379 * - EINVAL if invalid parameter pointer is provided
383 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
384 struct rte_bbdev_queue_info *queue_info);
386 /** @internal The data structure associated with each queue of a device. */
387 struct rte_bbdev_queue_data {
388 void *queue_private; /**< Driver-specific per-queue data */
389 struct rte_bbdev_queue_conf conf; /**< Current configuration */
390 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
391 bool started; /**< Queue state */
394 /** @internal Enqueue encode operations for processing on queue of a device. */
395 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
396 struct rte_bbdev_queue_data *q_data,
397 struct rte_bbdev_enc_op **ops,
400 /** @internal Enqueue decode operations for processing on queue of a device. */
401 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
402 struct rte_bbdev_queue_data *q_data,
403 struct rte_bbdev_dec_op **ops,
406 /** @internal Dequeue encode operations from a queue of a device. */
407 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
408 struct rte_bbdev_queue_data *q_data,
409 struct rte_bbdev_enc_op **ops, uint16_t num);
411 /** @internal Dequeue decode operations from a queue of a device. */
412 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
413 struct rte_bbdev_queue_data *q_data,
414 struct rte_bbdev_dec_op **ops, uint16_t num);
416 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
419 * @internal The data associated with a device, with no function pointers.
420 * This structure is safe to place in shared memory to be common among
421 * different processes in a multi-process configuration. Drivers can access
422 * these fields, but should never write to them!
424 struct rte_bbdev_data {
425 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
426 void *dev_private; /**< Driver-specific private data */
427 uint16_t num_queues; /**< Number of currently configured queues */
428 struct rte_bbdev_queue_data *queues; /**< Queue structures */
429 uint16_t dev_id; /**< Device ID */
430 int socket_id; /**< NUMA socket that device is on */
431 bool started; /**< Device run-time state */
432 uint16_t process_cnt; /** Counter of processes using the device */
435 /* Forward declarations */
436 struct rte_bbdev_ops;
437 struct rte_bbdev_callback;
438 struct rte_intr_handle;
440 /** Structure to keep track of registered callbacks */
441 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
444 * @internal The data structure associated with a device. Drivers can access
445 * these fields, but should only write to the *_ops fields.
447 struct __rte_cache_aligned rte_bbdev {
448 /** Enqueue encode function */
449 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
450 /** Enqueue decode function */
451 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
452 /** Dequeue encode function */
453 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
454 /** Dequeue decode function */
455 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
456 /** Enqueue encode function */
457 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
458 /** Enqueue decode function */
459 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
460 /** Dequeue encode function */
461 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
462 /** Dequeue decode function */
463 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
464 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
465 struct rte_bbdev_data *data; /**< Pointer to device data */
466 enum rte_bbdev_state state; /**< If device is currently used or not */
467 struct rte_device *device; /**< Backing device */
468 /** User application callback for interrupts if present */
469 struct rte_bbdev_cb_list list_cbs;
470 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
473 /** @internal array of all devices */
474 extern struct rte_bbdev rte_bbdev_devices[];
477 * Enqueue a burst of processed encode operations to a queue of the device.
478 * This functions only enqueues as many operations as currently possible and
479 * does not block until @p num_ops entries in the queue are available.
480 * This function does not provide any error notification to avoid the
481 * corresponding overhead.
484 * The identifier of the device.
486 * The index of the queue.
488 * Pointer array containing operations to be enqueued Must have at least
491 * The maximum number of operations to enqueue.
494 * The number of operations actually enqueued (this is the number of processed
495 * entries in the @p ops array).
498 static inline uint16_t
499 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
500 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
502 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
503 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
504 return dev->enqueue_enc_ops(q_data, ops, num_ops);
508 * Enqueue a burst of processed decode operations to a queue of the device.
509 * This functions only enqueues as many operations as currently possible and
510 * does not block until @p num_ops entries in the queue are available.
511 * This function does not provide any error notification to avoid the
512 * corresponding overhead.
515 * The identifier of the device.
517 * The index of the queue.
519 * Pointer array containing operations to be enqueued Must have at least
522 * The maximum number of operations to enqueue.
525 * The number of operations actually enqueued (this is the number of processed
526 * entries in the @p ops array).
529 static inline uint16_t
530 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
531 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
533 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
534 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
535 return dev->enqueue_dec_ops(q_data, ops, num_ops);
539 * Enqueue a burst of processed encode operations to a queue of the device.
540 * This functions only enqueues as many operations as currently possible and
541 * does not block until @p num_ops entries in the queue are available.
542 * This function does not provide any error notification to avoid the
543 * corresponding overhead.
546 * The identifier of the device.
548 * The index of the queue.
550 * Pointer array containing operations to be enqueued Must have at least
553 * The maximum number of operations to enqueue.
556 * The number of operations actually enqueued (this is the number of processed
557 * entries in the @p ops array).
560 static inline uint16_t
561 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
562 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
564 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
565 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
566 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
570 * Enqueue a burst of processed decode operations to a queue of the device.
571 * This functions only enqueues as many operations as currently possible and
572 * does not block until @p num_ops entries in the queue are available.
573 * This function does not provide any error notification to avoid the
574 * corresponding overhead.
577 * The identifier of the device.
579 * The index of the queue.
581 * Pointer array containing operations to be enqueued Must have at least
584 * The maximum number of operations to enqueue.
587 * The number of operations actually enqueued (this is the number of processed
588 * entries in the @p ops array).
591 static inline uint16_t
592 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
593 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
595 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
596 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
597 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
602 * Dequeue a burst of processed encode operations from a queue of the device.
603 * This functions returns only the current contents of the queue, and does not
604 * block until @ num_ops is available.
605 * This function does not provide any error notification to avoid the
606 * corresponding overhead.
609 * The identifier of the device.
611 * The index of the queue.
613 * Pointer array where operations will be dequeued to. Must have at least
615 * ie. A pointer to a table of void * pointers (ops) that will be filled.
617 * The maximum number of operations to dequeue.
620 * The number of operations actually dequeued (this is the number of entries
621 * copied into the @p ops array).
624 static inline uint16_t
625 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
626 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
628 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
629 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
630 return dev->dequeue_enc_ops(q_data, ops, num_ops);
634 * Dequeue a burst of processed decode operations from a queue of the device.
635 * This functions returns only the current contents of the queue, and does not
636 * block until @ num_ops is available.
637 * This function does not provide any error notification to avoid the
638 * corresponding overhead.
641 * The identifier of the device.
643 * The index of the queue.
645 * Pointer array where operations will be dequeued to. Must have at least
647 * ie. A pointer to a table of void * pointers (ops) that will be filled.
649 * The maximum number of operations to dequeue.
652 * The number of operations actually dequeued (this is the number of entries
653 * copied into the @p ops array).
657 static inline uint16_t
658 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
659 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
661 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
662 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
663 return dev->dequeue_dec_ops(q_data, ops, num_ops);
668 * Dequeue a burst of processed encode operations from a queue of the device.
669 * This functions returns only the current contents of the queue, and does not
670 * block until @ num_ops is available.
671 * This function does not provide any error notification to avoid the
672 * corresponding overhead.
675 * The identifier of the device.
677 * The index of the queue.
679 * Pointer array where operations will be dequeued to. Must have at least
682 * The maximum number of operations to dequeue.
685 * The number of operations actually dequeued (this is the number of entries
686 * copied into the @p ops array).
689 static inline uint16_t
690 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
691 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
693 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
694 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
695 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
699 * Dequeue a burst of processed decode operations from a queue of the device.
700 * This functions returns only the current contents of the queue, and does not
701 * block until @ num_ops is available.
702 * This function does not provide any error notification to avoid the
703 * corresponding overhead.
706 * The identifier of the device.
708 * The index of the queue.
710 * Pointer array where operations will be dequeued to. Must have at least
713 * The maximum number of operations to dequeue.
716 * The number of operations actually dequeued (this is the number of entries
717 * copied into the @p ops array).
720 static inline uint16_t
721 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
722 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
724 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
725 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
726 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
729 /** Definitions of device event types */
730 enum rte_bbdev_event_type {
731 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
732 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
733 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
734 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
738 * Typedef for application callback function registered by application
739 * software for notification of device events
744 * Device event to register for notification of.
746 * User specified parameter to be passed to user's callback function.
748 * To pass data back to user application.
750 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
751 enum rte_bbdev_event_type event, void *cb_arg,
755 * Register a callback function for specific device id. Multiple callbacks can
756 * be added and will be called in the order they are added when an event is
757 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
762 * The event that the callback will be registered for.
764 * User supplied callback function to be called.
766 * Pointer to parameter that will be passed to the callback.
769 * Zero on success, negative value on failure.
773 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
774 rte_bbdev_cb_fn cb_fn, void *cb_arg);
777 * Unregister a callback function for specific device id.
780 * The device identifier.
782 * The event that the callback will be unregistered for.
784 * User supplied callback function to be unregistered.
786 * Pointer to the parameter supplied when registering the callback.
787 * (void *)-1 means to remove all registered callbacks with the specified
792 * - EINVAL if invalid parameter pointer is provided
793 * - EAGAIN if the provided callback pointer does not exist
797 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
798 rte_bbdev_cb_fn cb_fn, void *cb_arg);
801 * Enable a one-shot interrupt on the next operation enqueued to a particular
802 * queue. The interrupt will be triggered when the operation is ready to be
803 * dequeued. To handle the interrupt, an epoll file descriptor must be
804 * registered using rte_bbdev_queue_intr_ctl(), and then an application
805 * thread/lcore can wait for the interrupt using rte_epoll_wait().
808 * The device identifier.
810 * The index of the queue.
814 * - negative value on failure - as returned from PMD driver
818 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
821 * Disable a one-shot interrupt on the next operation enqueued to a particular
822 * queue (if it has been enabled).
825 * The device identifier.
827 * The index of the queue.
831 * - negative value on failure - as returned from PMD driver
835 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
838 * Control interface for per-queue interrupts.
841 * The device identifier.
843 * The index of the queue.
845 * Epoll file descriptor that will be associated with the interrupt source.
846 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
847 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
848 * be used when calling rte_epoll_wait()).
850 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
851 * RTE_INTR_EVENT_DEL.
853 * User context, that will be returned in the epdata.data field of the
854 * rte_epoll_event structure filled in by rte_epoll_wait().
858 * - ENOTSUP if interrupts are not supported by the identified device
859 * - negative value on failure - as returned from PMD driver
863 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
870 #endif /* _RTE_BBDEV_H_ */