1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 * Wireless base band device abstraction APIs.
14 * @b EXPERIMENTAL: this API may change without prior notice
16 * This API allows an application to discover, configure and use a device to
17 * process operations. An asynchronous API (enqueue, followed by later dequeue)
18 * is used for processing operations.
20 * The functions in this API are not thread-safe when called on the same
21 * target object (a device, or a queue on a device), with the exception that
22 * one thread can enqueue operations to a queue while another thread dequeues
23 * from the same queue.
34 #include <rte_compat.h>
35 #include <rte_atomic.h>
37 #include <rte_cpuflags.h>
38 #include <rte_memory.h>
40 #include "rte_bbdev_op.h"
42 #ifndef RTE_BBDEV_MAX_DEVS
43 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
53 * Get the total number of devices that have been successfully initialised.
56 * The total number of usable devices.
60 rte_bbdev_count(void);
63 * Check if a device is valid.
66 * The identifier of the device.
69 * true if device ID is valid and device is attached, false otherwise.
73 rte_bbdev_is_valid(uint16_t dev_id);
76 * Get the next enabled device.
82 * - The next device, or
83 * - RTE_BBDEV_MAX_DEVS if none found
87 rte_bbdev_find_next(uint16_t dev_id);
89 /** Iterate through all enabled devices */
90 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
91 i < RTE_BBDEV_MAX_DEVS; \
92 i = rte_bbdev_find_next(i))
95 * Setup up device queues.
96 * This function must be called on a device before setting up the queues and
97 * starting the device. It can also be called when a device is in the stopped
98 * state. If any device queues have been configured their configuration will be
99 * cleared by a call to this function.
102 * The identifier of the device.
104 * Number of queues to configure on device.
106 * ID of a socket which will be used to allocate memory.
110 * - -ENODEV if dev_id is invalid or the device is corrupted
111 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
112 * - -EBUSY if the identified device has already started
113 * - -ENOMEM if unable to allocate memory
117 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
121 * This function may be called before starting the device to enable the
122 * interrupts if they are available.
125 * The identifier of the device.
129 * - -ENODEV if dev_id is invalid or the device is corrupted
130 * - -EBUSY if the identified device has already started
131 * - -ENOTSUP if the interrupts are not supported by the device
135 rte_bbdev_intr_enable(uint16_t dev_id);
137 /** Device queue configuration structure */
138 struct rte_bbdev_queue_conf {
139 int socket; /**< NUMA socket used for memory allocation */
140 uint32_t queue_size; /**< Size of queue */
141 uint8_t priority; /**< Queue priority */
142 bool deferred_start; /**< Do not start queue when device is started. */
143 enum rte_bbdev_op_type op_type; /**< Operation type */
147 * Configure a queue on a device.
148 * This function can be called after device configuration, and before starting.
149 * It can also be called when the device or the queue is in the stopped state.
152 * The identifier of the device.
154 * The index of the queue.
156 * The queue configuration. If NULL, a default configuration will be used.
160 * - EINVAL if the identified queue size or priority are invalid
161 * - EBUSY if the identified queue or its device have already started
165 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
166 const struct rte_bbdev_queue_conf *conf);
170 * This is the last step needed before enqueueing operations is possible.
173 * The identifier of the device.
177 * - negative value on failure - as returned from PMD driver
181 rte_bbdev_start(uint16_t dev_id);
185 * The device can be reconfigured, and restarted after being stopped.
188 * The identifier of the device.
195 rte_bbdev_stop(uint16_t dev_id);
199 * The device cannot be restarted without reconfiguration!
202 * The identifier of the device.
209 rte_bbdev_close(uint16_t dev_id);
212 * Start a specified queue on a device.
213 * This is only needed if the queue has been stopped, or if the deferred_start
214 * flag has been set when configuring the queue.
217 * The identifier of the device.
219 * The index of the queue.
223 * - negative value on failure - as returned from PMD driver
227 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
230 * Stop a specified queue on a device, to allow re configuration.
233 * The identifier of the device.
235 * The index of the queue.
239 * - negative value on failure - as returned from PMD driver
243 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
245 /** Device statistics. */
246 struct rte_bbdev_stats {
247 uint64_t enqueued_count; /**< Count of all operations enqueued */
248 uint64_t dequeued_count; /**< Count of all operations dequeued */
249 /** Total error count on operations enqueued */
250 uint64_t enqueue_err_count;
251 /** Total error count on operations dequeued */
252 uint64_t dequeue_err_count;
253 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
254 * the enqueue request to its internal queues.
255 * - For a HW device this is the cycles consumed in MMIO write
256 * - For a SW (vdev) device, this is the processing time of the
259 uint64_t acc_offload_cycles;
263 * Retrieve the general I/O statistics of a device.
266 * The identifier of the device.
268 * Pointer to structure to where statistics will be copied. On error, this
269 * location may or may not have been modified.
273 * - EINVAL if invalid parameter pointer is provided
277 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
280 * Reset the statistics of a device.
283 * The identifier of the device.
289 rte_bbdev_stats_reset(uint16_t dev_id);
291 /** Device information supplied by the device's driver */
292 struct rte_bbdev_driver_info {
294 const char *driver_name;
296 /** Maximum number of queues supported by the device */
297 unsigned int max_num_queues;
298 /** Queue size limit (queue size must also be power of 2) */
299 uint32_t queue_size_lim;
300 /** Set if device off-loads operation to hardware */
301 bool hardware_accelerated;
302 /** Max value supported by queue priority for DL */
303 uint8_t max_dl_queue_priority;
304 /** Max value supported by queue priority for UL */
305 uint8_t max_ul_queue_priority;
306 /** Set if device supports per-queue interrupts */
307 bool queue_intr_supported;
308 /** Minimum alignment of buffers, in bytes */
309 uint16_t min_alignment;
310 /** HARQ memory available in kB */
311 uint32_t harq_buffer_size;
312 /** Default queue configuration used if none is supplied */
313 struct rte_bbdev_queue_conf default_queue_conf;
314 /** Device operation capabilities */
315 const struct rte_bbdev_op_cap *capabilities;
316 /** Device cpu_flag requirements */
317 const enum rte_cpu_flag_t *cpu_flag_reqs;
320 /** Macro used at end of bbdev PMD list */
321 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
322 { RTE_BBDEV_OP_NONE }
325 * Device information structure used by an application to discover a devices
326 * capabilities and current configuration
328 struct rte_bbdev_info {
329 int socket_id; /**< NUMA socket that device is on */
330 const char *dev_name; /**< Unique device name */
331 const struct rte_device *device; /**< Device Information */
332 uint16_t num_queues; /**< Number of queues currently configured */
333 bool started; /**< Set if device is currently started */
334 struct rte_bbdev_driver_info drv; /**< Info from device driver */
338 * Retrieve information about a device.
341 * The identifier of the device.
343 * Pointer to structure to where information will be copied. On error, this
344 * location may or may not have been modified.
348 * - EINVAL if invalid parameter pointer is provided
352 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
354 /** Queue information */
355 struct rte_bbdev_queue_info {
356 /** Current device configuration */
357 struct rte_bbdev_queue_conf conf;
358 /** Set if queue is currently started */
363 * Retrieve information about a specific queue on a device.
366 * The identifier of the device.
368 * The index of the queue.
370 * Pointer to structure to where information will be copied. On error, this
371 * location may or may not have been modified.
375 * - EINVAL if invalid parameter pointer is provided
379 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
380 struct rte_bbdev_queue_info *queue_info);
382 /** @internal The data structure associated with each queue of a device. */
383 struct rte_bbdev_queue_data {
384 void *queue_private; /**< Driver-specific per-queue data */
385 struct rte_bbdev_queue_conf conf; /**< Current configuration */
386 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
387 bool started; /**< Queue state */
390 /** @internal Enqueue encode operations for processing on queue of a device. */
391 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
392 struct rte_bbdev_queue_data *q_data,
393 struct rte_bbdev_enc_op **ops,
396 /** @internal Enqueue decode operations for processing on queue of a device. */
397 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
398 struct rte_bbdev_queue_data *q_data,
399 struct rte_bbdev_dec_op **ops,
402 /** @internal Dequeue encode operations from a queue of a device. */
403 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
404 struct rte_bbdev_queue_data *q_data,
405 struct rte_bbdev_enc_op **ops, uint16_t num);
407 /** @internal Dequeue decode operations from a queue of a device. */
408 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
409 struct rte_bbdev_queue_data *q_data,
410 struct rte_bbdev_dec_op **ops, uint16_t num);
412 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
415 * @internal The data associated with a device, with no function pointers.
416 * This structure is safe to place in shared memory to be common among
417 * different processes in a multi-process configuration. Drivers can access
418 * these fields, but should never write to them!
420 struct rte_bbdev_data {
421 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
422 void *dev_private; /**< Driver-specific private data */
423 uint16_t num_queues; /**< Number of currently configured queues */
424 struct rte_bbdev_queue_data *queues; /**< Queue structures */
425 uint16_t dev_id; /**< Device ID */
426 int socket_id; /**< NUMA socket that device is on */
427 bool started; /**< Device run-time state */
428 /** Counter of processes using the device */
429 rte_atomic16_t process_cnt;
432 /* Forward declarations */
433 struct rte_bbdev_ops;
434 struct rte_bbdev_callback;
435 struct rte_intr_handle;
437 /** Structure to keep track of registered callbacks */
438 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
441 * @internal The data structure associated with a device. Drivers can access
442 * these fields, but should only write to the *_ops fields.
444 struct __rte_cache_aligned rte_bbdev {
445 /**< Enqueue encode function */
446 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
447 /**< Enqueue decode function */
448 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
449 /**< Dequeue encode function */
450 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
451 /**< Dequeue decode function */
452 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
453 /**< Enqueue encode function */
454 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
455 /**< Enqueue decode function */
456 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
457 /**< Dequeue encode function */
458 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
459 /**< Dequeue decode function */
460 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
461 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
462 struct rte_bbdev_data *data; /**< Pointer to device data */
463 enum rte_bbdev_state state; /**< If device is currently used or not */
464 struct rte_device *device; /**< Backing device */
465 /** User application callback for interrupts if present */
466 struct rte_bbdev_cb_list list_cbs;
467 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
470 /** @internal array of all devices */
471 extern struct rte_bbdev rte_bbdev_devices[];
474 * Enqueue a burst of processed encode operations to a queue of the device.
475 * This functions only enqueues as many operations as currently possible and
476 * does not block until @p num_ops entries in the queue are available.
477 * This function does not provide any error notification to avoid the
478 * corresponding overhead.
481 * The identifier of the device.
483 * The index of the queue.
485 * Pointer array containing operations to be enqueued Must have at least
488 * The maximum number of operations to enqueue.
491 * The number of operations actually enqueued (this is the number of processed
492 * entries in the @p ops array).
495 static inline uint16_t
496 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
497 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
499 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
500 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
501 return dev->enqueue_enc_ops(q_data, ops, num_ops);
505 * Enqueue a burst of processed decode operations to a queue of the device.
506 * This functions only enqueues as many operations as currently possible and
507 * does not block until @p num_ops entries in the queue are available.
508 * This function does not provide any error notification to avoid the
509 * corresponding overhead.
512 * The identifier of the device.
514 * The index of the queue.
516 * Pointer array containing operations to be enqueued Must have at least
519 * The maximum number of operations to enqueue.
522 * The number of operations actually enqueued (this is the number of processed
523 * entries in the @p ops array).
526 static inline uint16_t
527 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
528 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
530 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
531 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
532 return dev->enqueue_dec_ops(q_data, ops, num_ops);
536 * Enqueue a burst of processed encode operations to a queue of the device.
537 * This functions only enqueues as many operations as currently possible and
538 * does not block until @p num_ops entries in the queue are available.
539 * This function does not provide any error notification to avoid the
540 * corresponding overhead.
543 * The identifier of the device.
545 * The index of the queue.
547 * Pointer array containing operations to be enqueued Must have at least
550 * The maximum number of operations to enqueue.
553 * The number of operations actually enqueued (this is the number of processed
554 * entries in the @p ops array).
557 static inline uint16_t
558 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
559 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
561 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
562 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
563 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
567 * Enqueue a burst of processed decode operations to a queue of the device.
568 * This functions only enqueues as many operations as currently possible and
569 * does not block until @p num_ops entries in the queue are available.
570 * This function does not provide any error notification to avoid the
571 * corresponding overhead.
574 * The identifier of the device.
576 * The index of the queue.
578 * Pointer array containing operations to be enqueued Must have at least
581 * The maximum number of operations to enqueue.
584 * The number of operations actually enqueued (this is the number of processed
585 * entries in the @p ops array).
588 static inline uint16_t
589 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
590 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
592 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
593 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
594 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
599 * Dequeue a burst of processed encode operations from a queue of the device.
600 * This functions returns only the current contents of the queue, and does not
601 * block until @ num_ops is available.
602 * This function does not provide any error notification to avoid the
603 * corresponding overhead.
606 * The identifier of the device.
608 * The index of the queue.
610 * Pointer array where operations will be dequeued to. Must have at least
612 * ie. A pointer to a table of void * pointers (ops) that will be filled.
614 * The maximum number of operations to dequeue.
617 * The number of operations actually dequeued (this is the number of entries
618 * copied into the @p ops array).
621 static inline uint16_t
622 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
623 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
625 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
626 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
627 return dev->dequeue_enc_ops(q_data, ops, num_ops);
631 * Dequeue a burst of processed decode operations from a queue of the device.
632 * This functions returns only the current contents of the queue, and does not
633 * block until @ num_ops is available.
634 * This function does not provide any error notification to avoid the
635 * corresponding overhead.
638 * The identifier of the device.
640 * The index of the queue.
642 * Pointer array where operations will be dequeued to. Must have at least
644 * ie. A pointer to a table of void * pointers (ops) that will be filled.
646 * The maximum number of operations to dequeue.
649 * The number of operations actually dequeued (this is the number of entries
650 * copied into the @p ops array).
654 static inline uint16_t
655 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
656 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
658 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
659 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
660 return dev->dequeue_dec_ops(q_data, ops, num_ops);
665 * Dequeue a burst of processed encode operations from a queue of the device.
666 * This functions returns only the current contents of the queue, and does not
667 * block until @ num_ops is available.
668 * This function does not provide any error notification to avoid the
669 * corresponding overhead.
672 * The identifier of the device.
674 * The index of the queue.
676 * Pointer array where operations will be dequeued to. Must have at least
679 * The maximum number of operations to dequeue.
682 * The number of operations actually dequeued (this is the number of entries
683 * copied into the @p ops array).
686 static inline uint16_t
687 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
688 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
690 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
691 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
692 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
696 * Dequeue a burst of processed decode operations from a queue of the device.
697 * This functions returns only the current contents of the queue, and does not
698 * block until @ num_ops is available.
699 * This function does not provide any error notification to avoid the
700 * corresponding overhead.
703 * The identifier of the device.
705 * The index of the queue.
707 * Pointer array where operations will be dequeued to. Must have at least
710 * The maximum number of operations to dequeue.
713 * The number of operations actually dequeued (this is the number of entries
714 * copied into the @p ops array).
717 static inline uint16_t
718 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
719 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
721 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
722 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
723 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
726 /** Definitions of device event types */
727 enum rte_bbdev_event_type {
728 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
729 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
730 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
731 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
735 * Typedef for application callback function registered by application
736 * software for notification of device events
741 * Device event to register for notification of.
743 * User specified parameter to be passed to user's callback function.
745 * To pass data back to user application.
747 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
748 enum rte_bbdev_event_type event, void *cb_arg,
752 * Register a callback function for specific device id. Multiple callbacks can
753 * be added and will be called in the order they are added when an event is
754 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
759 * The event that the callback will be registered for.
761 * User supplied callback function to be called.
763 * Pointer to parameter that will be passed to the callback.
766 * Zero on success, negative value on failure.
770 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
771 rte_bbdev_cb_fn cb_fn, void *cb_arg);
774 * Unregister a callback function for specific device id.
777 * The device identifier.
779 * The event that the callback will be unregistered for.
781 * User supplied callback function to be unregistered.
783 * Pointer to the parameter supplied when registering the callback.
784 * (void *)-1 means to remove all registered callbacks with the specified
789 * - EINVAL if invalid parameter pointer is provided
790 * - EAGAIN if the provided callback pointer does not exist
794 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
795 rte_bbdev_cb_fn cb_fn, void *cb_arg);
798 * Enable a one-shot interrupt on the next operation enqueued to a particular
799 * queue. The interrupt will be triggered when the operation is ready to be
800 * dequeued. To handle the interrupt, an epoll file descriptor must be
801 * registered using rte_bbdev_queue_intr_ctl(), and then an application
802 * thread/lcore can wait for the interrupt using rte_epoll_wait().
805 * The device identifier.
807 * The index of the queue.
811 * - negative value on failure - as returned from PMD driver
815 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
818 * Disable a one-shot interrupt on the next operation enqueued to a particular
819 * queue (if it has been enabled).
822 * The device identifier.
824 * The index of the queue.
828 * - negative value on failure - as returned from PMD driver
832 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
835 * Control interface for per-queue interrupts.
838 * The device identifier.
840 * The index of the queue.
842 * Epoll file descriptor that will be associated with the interrupt source.
843 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
844 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
845 * be used when calling rte_epoll_wait()).
847 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
848 * RTE_INTR_EVENT_DEL.
850 * User context, that will be returned in the epdata.data field of the
851 * rte_epoll_event structure filled in by rte_epoll_wait().
855 * - ENOTSUP if interrupts are not supported by the identified device
856 * - negative value on failure - as returned from PMD driver
860 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
867 #endif /* _RTE_BBDEV_H_ */