1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_compat.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
13 #include <rte_debug.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
20 #include <rte_spinlock.h>
21 #include <rte_tailq.h>
22 #include <rte_interrupts.h>
24 #include "rte_bbdev_op.h"
25 #include "rte_bbdev.h"
26 #include "rte_bbdev_pmd.h"
28 #define DEV_NAME "BBDEV"
31 /* BBDev library logging ID */
32 static int bbdev_logtype;
34 /* Helper macro for logging */
35 #define rte_bbdev_log(level, fmt, ...) \
36 rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
38 #define rte_bbdev_log_debug(fmt, ...) \
39 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
42 /* Helper macro to check dev_id is valid */
43 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
45 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
50 /* Helper macro to check dev_ops is valid */
51 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
52 if (dev->dev_ops == NULL) { \
53 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
59 /* Helper macro to check that driver implements required function pointer */
60 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
62 rte_bbdev_log(ERR, "device %u does not support %s", \
68 /* Helper macro to check that queue is valid */
69 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
70 if (queue_id >= dev->data->num_queues) { \
71 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
72 queue_id, dev->data->dev_id); \
77 /* List of callback functions registered by an application */
78 struct rte_bbdev_callback {
79 TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */
80 rte_bbdev_cb_fn cb_fn; /* Callback address */
81 void *cb_arg; /* Parameter for callback */
82 void *ret_param; /* Return parameter */
83 enum rte_bbdev_event_type event; /* Interrupt event type */
84 uint32_t active; /* Callback is executing */
87 /* spinlock for bbdev device callbacks */
88 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
91 * Global array of all devices. This is not static because it's used by the
92 * inline enqueue and dequeue functions
94 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
96 /* Global array with rte_bbdev_data structures */
97 static struct rte_bbdev_data *rte_bbdev_data;
99 /* Memzone name for global bbdev data pool */
100 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
102 /* Number of currently valid devices */
103 static uint16_t num_devs;
105 /* Return pointer to device structure, with validity check */
106 static struct rte_bbdev *
107 get_dev(uint16_t dev_id)
109 if (rte_bbdev_is_valid(dev_id))
110 return &rte_bbdev_devices[dev_id];
114 /* Allocate global data array */
116 rte_bbdev_data_alloc(void)
118 const unsigned int flags = 0;
119 const struct rte_memzone *mz;
121 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
122 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
123 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
124 rte_socket_id(), flags);
126 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
129 "Cannot allocate memzone for bbdev port data");
133 rte_bbdev_data = mz->addr;
134 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135 memset(rte_bbdev_data, 0,
136 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
141 * Find data alocated for the device or if not found return first unused bbdev
142 * data. If all structures are in use and none is used by the device return
145 static struct rte_bbdev_data *
146 find_bbdev_data(const char *name)
150 for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
151 if (strlen(rte_bbdev_data[data_id].name) == 0) {
152 memset(&rte_bbdev_data[data_id], 0,
153 sizeof(struct rte_bbdev_data));
154 return &rte_bbdev_data[data_id];
155 } else if (strncmp(rte_bbdev_data[data_id].name, name,
156 RTE_BBDEV_NAME_MAX_LEN) == 0)
157 return &rte_bbdev_data[data_id];
163 /* Find lowest device id with no attached device */
165 find_free_dev_id(void)
168 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
169 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
172 return RTE_BBDEV_MAX_DEVS;
175 struct rte_bbdev * __rte_experimental
176 rte_bbdev_allocate(const char *name)
179 struct rte_bbdev *bbdev;
183 rte_bbdev_log(ERR, "Invalid null device name");
187 if (rte_bbdev_get_named_dev(name) != NULL) {
188 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
192 dev_id = find_free_dev_id();
193 if (dev_id == RTE_BBDEV_MAX_DEVS) {
194 rte_bbdev_log(ERR, "Reached maximum number of devices");
198 bbdev = &rte_bbdev_devices[dev_id];
200 if (rte_bbdev_data == NULL) {
201 ret = rte_bbdev_data_alloc();
206 bbdev->data = find_bbdev_data(name);
207 if (bbdev->data == NULL) {
209 "Max BBDevs already allocated in multi-process environment!");
213 rte_atomic16_inc(&bbdev->data->process_cnt);
214 bbdev->data->dev_id = dev_id;
215 bbdev->state = RTE_BBDEV_INITIALIZED;
217 ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
218 if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
219 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
223 /* init user callbacks */
224 TAILQ_INIT(&(bbdev->list_cbs));
228 rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
229 name, dev_id, num_devs);
234 int __rte_experimental
235 rte_bbdev_release(struct rte_bbdev *bbdev)
238 struct rte_bbdev_callback *cb, *next;
241 rte_bbdev_log(ERR, "NULL bbdev");
244 dev_id = bbdev->data->dev_id;
246 /* free all callbacks from the device's list */
247 for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
249 next = TAILQ_NEXT(cb, next);
250 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
254 /* clear shared BBDev Data if no process is using the device anymore */
255 if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
256 memset(bbdev->data, 0, sizeof(*bbdev->data));
258 memset(bbdev, 0, sizeof(*bbdev));
260 bbdev->state = RTE_BBDEV_UNUSED;
263 "Un-initialised device id = %u. Num devices = %u",
268 struct rte_bbdev * __rte_experimental
269 rte_bbdev_get_named_dev(const char *name)
274 rte_bbdev_log(ERR, "NULL driver name");
278 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
279 struct rte_bbdev *dev = get_dev(i);
280 if (dev && (strncmp(dev->data->name,
281 name, RTE_BBDEV_NAME_MAX_LEN) == 0))
288 uint16_t __rte_experimental
289 rte_bbdev_count(void)
294 bool __rte_experimental
295 rte_bbdev_is_valid(uint16_t dev_id)
297 if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
298 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
303 uint16_t __rte_experimental
304 rte_bbdev_find_next(uint16_t dev_id)
307 for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
308 if (rte_bbdev_is_valid(dev_id))
313 int __rte_experimental
314 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
318 struct rte_bbdev_driver_info dev_info;
319 struct rte_bbdev *dev = get_dev(dev_id);
320 VALID_DEV_OR_RET_ERR(dev, dev_id);
322 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
324 if (dev->data->started) {
326 "Device %u cannot be configured when started",
331 /* Get device driver information to get max number of queues */
332 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
333 memset(&dev_info, 0, sizeof(dev_info));
334 dev->dev_ops->info_get(dev, &dev_info);
336 if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
338 "Device %u supports 0 < N <= %u queues, not %u",
339 dev_id, dev_info.max_num_queues, num_queues);
343 /* If re-configuration, get driver to free existing internal memory */
344 if (dev->data->queues != NULL) {
345 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
346 for (i = 0; i < dev->data->num_queues; i++) {
347 int ret = dev->dev_ops->queue_release(dev, i);
350 "Device %u queue %u release failed",
355 /* Call optional device close */
356 if (dev->dev_ops->close) {
357 ret = dev->dev_ops->close(dev);
360 "Device %u couldn't be closed",
365 rte_free(dev->data->queues);
368 /* Allocate queue pointers */
369 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
370 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
371 dev->data->socket_id);
372 if (dev->data->queues == NULL) {
374 "calloc of %u queues for device %u on socket %i failed",
375 num_queues, dev_id, dev->data->socket_id);
379 dev->data->num_queues = num_queues;
381 /* Call optional device configuration */
382 if (dev->dev_ops->setup_queues) {
383 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
386 "Device %u memory configuration failed",
392 rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
397 dev->data->num_queues = 0;
398 rte_free(dev->data->queues);
399 dev->data->queues = NULL;
403 int __rte_experimental
404 rte_bbdev_intr_enable(uint16_t dev_id)
407 struct rte_bbdev *dev = get_dev(dev_id);
408 VALID_DEV_OR_RET_ERR(dev, dev_id);
410 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
412 if (dev->data->started) {
414 "Device %u cannot be configured when started",
419 if (dev->dev_ops->intr_enable) {
420 ret = dev->dev_ops->intr_enable(dev);
423 "Device %u interrupts configuration failed",
427 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
431 rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
435 int __rte_experimental
436 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
437 const struct rte_bbdev_queue_conf *conf)
440 struct rte_bbdev_driver_info dev_info;
441 struct rte_bbdev *dev = get_dev(dev_id);
442 const struct rte_bbdev_op_cap *p;
443 struct rte_bbdev_queue_conf *stored_conf;
444 const char *op_type_str;
445 VALID_DEV_OR_RET_ERR(dev, dev_id);
447 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
449 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
451 if (dev->data->queues[queue_id].started || dev->data->started) {
453 "Queue %u of device %u cannot be configured when started",
458 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
461 /* Get device driver information to verify config is valid */
462 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
463 memset(&dev_info, 0, sizeof(dev_info));
464 dev->dev_ops->info_get(dev, &dev_info);
466 /* Check configuration is valid */
468 if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
469 (dev_info.capabilities[0].type ==
470 RTE_BBDEV_OP_NONE)) {
473 for (p = dev_info.capabilities;
474 p->type != RTE_BBDEV_OP_NONE; p++) {
475 if (conf->op_type == p->type) {
482 rte_bbdev_log(ERR, "Invalid operation type");
485 if (conf->queue_size > dev_info.queue_size_lim) {
487 "Size (%u) of queue %u of device %u must be: <= %u",
488 conf->queue_size, queue_id, dev_id,
489 dev_info.queue_size_lim);
492 if (!rte_is_power_of_2(conf->queue_size)) {
494 "Size (%u) of queue %u of device %u must be a power of 2",
495 conf->queue_size, queue_id, dev_id);
498 if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
499 conf->priority > dev_info.max_ul_queue_priority) {
501 "Priority (%u) of queue %u of bdev %u must be <= %u",
502 conf->priority, queue_id, dev_id,
503 dev_info.max_ul_queue_priority);
506 if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
507 conf->priority > dev_info.max_dl_queue_priority) {
509 "Priority (%u) of queue %u of bdev %u must be <= %u",
510 conf->priority, queue_id, dev_id,
511 dev_info.max_dl_queue_priority);
516 /* Release existing queue (in case of queue reconfiguration) */
517 if (dev->data->queues[queue_id].queue_private != NULL) {
518 ret = dev->dev_ops->queue_release(dev, queue_id);
520 rte_bbdev_log(ERR, "Device %u queue %u release failed",
526 /* Get driver to setup the queue */
527 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
528 conf : &dev_info.default_queue_conf);
531 "Device %u queue %u setup failed", dev_id,
536 /* Store configuration */
537 stored_conf = &dev->data->queues[queue_id].conf;
539 (conf != NULL) ? conf : &dev_info.default_queue_conf,
540 sizeof(*stored_conf));
542 op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
543 if (op_type_str == NULL)
546 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
547 dev_id, queue_id, stored_conf->queue_size, op_type_str,
548 stored_conf->priority);
553 int __rte_experimental
554 rte_bbdev_start(uint16_t dev_id)
557 struct rte_bbdev *dev = get_dev(dev_id);
558 VALID_DEV_OR_RET_ERR(dev, dev_id);
560 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
562 if (dev->data->started) {
563 rte_bbdev_log_debug("Device %u is already started", dev_id);
567 if (dev->dev_ops->start) {
568 int ret = dev->dev_ops->start(dev);
570 rte_bbdev_log(ERR, "Device %u start failed", dev_id);
575 /* Store new state */
576 for (i = 0; i < dev->data->num_queues; i++)
577 if (!dev->data->queues[i].conf.deferred_start)
578 dev->data->queues[i].started = true;
579 dev->data->started = true;
581 rte_bbdev_log_debug("Started device %u", dev_id);
585 int __rte_experimental
586 rte_bbdev_stop(uint16_t dev_id)
588 struct rte_bbdev *dev = get_dev(dev_id);
589 VALID_DEV_OR_RET_ERR(dev, dev_id);
591 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
593 if (!dev->data->started) {
594 rte_bbdev_log_debug("Device %u is already stopped", dev_id);
598 if (dev->dev_ops->stop)
599 dev->dev_ops->stop(dev);
600 dev->data->started = false;
602 rte_bbdev_log_debug("Stopped device %u", dev_id);
606 int __rte_experimental
607 rte_bbdev_close(uint16_t dev_id)
611 struct rte_bbdev *dev = get_dev(dev_id);
612 VALID_DEV_OR_RET_ERR(dev, dev_id);
614 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
616 if (dev->data->started) {
617 ret = rte_bbdev_stop(dev_id);
619 rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
624 /* Free memory used by queues */
625 for (i = 0; i < dev->data->num_queues; i++) {
626 ret = dev->dev_ops->queue_release(dev, i);
628 rte_bbdev_log(ERR, "Device %u queue %u release failed",
633 rte_free(dev->data->queues);
635 if (dev->dev_ops->close) {
636 ret = dev->dev_ops->close(dev);
638 rte_bbdev_log(ERR, "Device %u close failed", dev_id);
643 /* Clear configuration */
644 dev->data->queues = NULL;
645 dev->data->num_queues = 0;
647 rte_bbdev_log_debug("Closed device %u", dev_id);
651 int __rte_experimental
652 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
654 struct rte_bbdev *dev = get_dev(dev_id);
655 VALID_DEV_OR_RET_ERR(dev, dev_id);
657 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
659 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
661 if (dev->data->queues[queue_id].started) {
662 rte_bbdev_log_debug("Queue %u of device %u already started",
667 if (dev->dev_ops->queue_start) {
668 int ret = dev->dev_ops->queue_start(dev, queue_id);
670 rte_bbdev_log(ERR, "Device %u queue %u start failed",
675 dev->data->queues[queue_id].started = true;
677 rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
681 int __rte_experimental
682 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
684 struct rte_bbdev *dev = get_dev(dev_id);
685 VALID_DEV_OR_RET_ERR(dev, dev_id);
687 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
689 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
691 if (!dev->data->queues[queue_id].started) {
692 rte_bbdev_log_debug("Queue %u of device %u already stopped",
697 if (dev->dev_ops->queue_stop) {
698 int ret = dev->dev_ops->queue_stop(dev, queue_id);
700 rte_bbdev_log(ERR, "Device %u queue %u stop failed",
705 dev->data->queues[queue_id].started = false;
707 rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
711 /* Get device statistics */
713 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
716 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
717 struct rte_bbdev_stats *q_stats =
718 &dev->data->queues[q_id].queue_stats;
720 stats->enqueued_count += q_stats->enqueued_count;
721 stats->dequeued_count += q_stats->dequeued_count;
722 stats->enqueue_err_count += q_stats->enqueue_err_count;
723 stats->dequeue_err_count += q_stats->dequeue_err_count;
725 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
729 reset_stats_in_queues(struct rte_bbdev *dev)
732 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
733 struct rte_bbdev_stats *q_stats =
734 &dev->data->queues[q_id].queue_stats;
736 memset(q_stats, 0, sizeof(*q_stats));
738 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
741 int __rte_experimental
742 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
744 struct rte_bbdev *dev = get_dev(dev_id);
745 VALID_DEV_OR_RET_ERR(dev, dev_id);
747 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
750 rte_bbdev_log(ERR, "NULL stats structure");
754 memset(stats, 0, sizeof(*stats));
755 if (dev->dev_ops->stats_get != NULL)
756 dev->dev_ops->stats_get(dev, stats);
758 get_stats_from_queues(dev, stats);
760 rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
764 int __rte_experimental
765 rte_bbdev_stats_reset(uint16_t dev_id)
767 struct rte_bbdev *dev = get_dev(dev_id);
768 VALID_DEV_OR_RET_ERR(dev, dev_id);
770 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
772 if (dev->dev_ops->stats_reset != NULL)
773 dev->dev_ops->stats_reset(dev);
775 reset_stats_in_queues(dev);
777 rte_bbdev_log_debug("Reset stats of device %u", dev_id);
781 int __rte_experimental
782 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
784 struct rte_bbdev *dev = get_dev(dev_id);
785 VALID_DEV_OR_RET_ERR(dev, dev_id);
787 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
789 if (dev_info == NULL) {
790 rte_bbdev_log(ERR, "NULL dev info structure");
794 /* Copy data maintained by device interface layer */
795 memset(dev_info, 0, sizeof(*dev_info));
796 dev_info->dev_name = dev->data->name;
797 dev_info->num_queues = dev->data->num_queues;
798 dev_info->bus = rte_bus_find_by_device(dev->device);
799 dev_info->socket_id = dev->data->socket_id;
800 dev_info->started = dev->data->started;
802 /* Copy data maintained by device driver layer */
803 dev->dev_ops->info_get(dev, &dev_info->drv);
805 rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
809 int __rte_experimental
810 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
811 struct rte_bbdev_queue_info *queue_info)
813 struct rte_bbdev *dev = get_dev(dev_id);
814 VALID_DEV_OR_RET_ERR(dev, dev_id);
816 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
818 if (queue_info == NULL) {
819 rte_bbdev_log(ERR, "NULL queue info structure");
823 /* Copy data to output */
824 memset(queue_info, 0, sizeof(*queue_info));
825 queue_info->conf = dev->data->queues[queue_id].conf;
826 queue_info->started = dev->data->queues[queue_id].started;
828 rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
833 /* Calculate size needed to store bbdev_op, depending on type */
835 get_bbdev_op_size(enum rte_bbdev_op_type type)
837 unsigned int result = 0;
839 case RTE_BBDEV_OP_NONE:
840 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
841 sizeof(struct rte_bbdev_enc_op));
843 case RTE_BBDEV_OP_TURBO_DEC:
844 result = sizeof(struct rte_bbdev_dec_op);
846 case RTE_BBDEV_OP_TURBO_ENC:
847 result = sizeof(struct rte_bbdev_enc_op);
856 /* Initialise a bbdev_op structure */
858 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
859 __rte_unused unsigned int n)
861 enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
863 if (type == RTE_BBDEV_OP_TURBO_DEC) {
864 struct rte_bbdev_dec_op *op = element;
865 memset(op, 0, mempool->elt_size);
866 op->mempool = mempool;
867 } else if (type == RTE_BBDEV_OP_TURBO_ENC) {
868 struct rte_bbdev_enc_op *op = element;
869 memset(op, 0, mempool->elt_size);
870 op->mempool = mempool;
874 struct rte_mempool * __rte_experimental
875 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
876 unsigned int num_elements, unsigned int cache_size,
879 struct rte_bbdev_op_pool_private *priv;
880 struct rte_mempool *mp;
881 const char *op_type_str;
884 rte_bbdev_log(ERR, "NULL name for op pool");
888 if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
890 "Invalid op type (%u), should be less than %u",
891 type, RTE_BBDEV_OP_TYPE_COUNT);
895 mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
896 cache_size, sizeof(struct rte_bbdev_op_pool_private),
897 NULL, NULL, bbdev_op_init, &type, socket_id, 0);
900 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
901 name, num_elements, get_bbdev_op_size(type),
902 rte_strerror(rte_errno));
906 op_type_str = rte_bbdev_op_type_str(type);
907 if (op_type_str == NULL)
911 "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
912 name, num_elements, op_type_str, cache_size, socket_id,
913 get_bbdev_op_size(type));
915 priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
921 int __rte_experimental
922 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
923 rte_bbdev_cb_fn cb_fn, void *cb_arg)
925 struct rte_bbdev_callback *user_cb;
926 struct rte_bbdev *dev = get_dev(dev_id);
927 VALID_DEV_OR_RET_ERR(dev, dev_id);
929 if (event >= RTE_BBDEV_EVENT_MAX) {
931 "Invalid event type (%u), should be less than %u",
932 event, RTE_BBDEV_EVENT_MAX);
937 rte_bbdev_log(ERR, "NULL callback function");
941 rte_spinlock_lock(&rte_bbdev_cb_lock);
943 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
944 if (user_cb->cb_fn == cb_fn &&
945 user_cb->cb_arg == cb_arg &&
946 user_cb->event == event)
950 /* create a new callback. */
951 if (user_cb == NULL) {
952 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
953 sizeof(struct rte_bbdev_callback), 0);
954 if (user_cb != NULL) {
955 user_cb->cb_fn = cb_fn;
956 user_cb->cb_arg = cb_arg;
957 user_cb->event = event;
958 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
962 rte_spinlock_unlock(&rte_bbdev_cb_lock);
963 return (user_cb == NULL) ? -ENOMEM : 0;
966 int __rte_experimental
967 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
968 rte_bbdev_cb_fn cb_fn, void *cb_arg)
971 struct rte_bbdev_callback *cb, *next;
972 struct rte_bbdev *dev = get_dev(dev_id);
973 VALID_DEV_OR_RET_ERR(dev, dev_id);
975 if (event >= RTE_BBDEV_EVENT_MAX) {
977 "Invalid event type (%u), should be less than %u",
978 event, RTE_BBDEV_EVENT_MAX);
984 "NULL callback function cannot be unregistered");
988 dev = &rte_bbdev_devices[dev_id];
989 rte_spinlock_lock(&rte_bbdev_cb_lock);
991 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
993 next = TAILQ_NEXT(cb, next);
995 if (cb->cb_fn != cb_fn || cb->event != event ||
996 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
999 /* If this callback is not executing right now, remove it. */
1000 if (cb->active == 0) {
1001 TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1007 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1011 void __rte_experimental
1012 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1013 enum rte_bbdev_event_type event, void *ret_param)
1015 struct rte_bbdev_callback *cb_lst;
1016 struct rte_bbdev_callback dev_cb;
1019 rte_bbdev_log(ERR, "NULL device");
1023 if (dev->data == NULL) {
1024 rte_bbdev_log(ERR, "NULL data structure");
1028 if (event >= RTE_BBDEV_EVENT_MAX) {
1030 "Invalid event type (%u), should be less than %u",
1031 event, RTE_BBDEV_EVENT_MAX);
1035 rte_spinlock_lock(&rte_bbdev_cb_lock);
1036 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1037 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1041 if (ret_param != NULL)
1042 dev_cb.ret_param = ret_param;
1044 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1045 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1046 dev_cb.cb_arg, dev_cb.ret_param);
1047 rte_spinlock_lock(&rte_bbdev_cb_lock);
1050 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1053 int __rte_experimental
1054 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1056 struct rte_bbdev *dev = get_dev(dev_id);
1057 VALID_DEV_OR_RET_ERR(dev, dev_id);
1058 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1059 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1060 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1061 return dev->dev_ops->queue_intr_enable(dev, queue_id);
1064 int __rte_experimental
1065 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1067 struct rte_bbdev *dev = get_dev(dev_id);
1068 VALID_DEV_OR_RET_ERR(dev, dev_id);
1069 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1070 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1071 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1072 return dev->dev_ops->queue_intr_disable(dev, queue_id);
1075 int __rte_experimental
1076 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1080 struct rte_bbdev *dev = get_dev(dev_id);
1081 struct rte_intr_handle *intr_handle;
1084 VALID_DEV_OR_RET_ERR(dev, dev_id);
1085 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1087 intr_handle = dev->intr_handle;
1088 if (!intr_handle || !intr_handle->intr_vec) {
1089 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1093 if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1094 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1099 vec = intr_handle->intr_vec[queue_id];
1100 ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1101 if (ret && (ret != -EEXIST)) {
1103 "dev %u q %u int ctl error op %d epfd %d vec %u\n",
1104 dev_id, queue_id, op, epfd, vec);
1112 const char * __rte_experimental
1113 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1115 static const char * const op_types[] = {
1116 "RTE_BBDEV_OP_NONE",
1117 "RTE_BBDEV_OP_TURBO_DEC",
1118 "RTE_BBDEV_OP_TURBO_ENC",
1121 if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1122 return op_types[op_type];
1124 rte_bbdev_log(ERR, "Invalid operation type");
1128 RTE_INIT(rte_bbdev_init_log);
1130 rte_bbdev_init_log(void)
1132 bbdev_logtype = rte_log_register("lib.bbdev");
1133 if (bbdev_logtype >= 0)
1134 rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);