1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_errno.h>
13 #include <rte_malloc.h>
14 #include <rte_mempool.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17 #include <rte_spinlock.h>
18 #include <rte_interrupts.h>
20 #include "rte_bbdev_op.h"
21 #include "rte_bbdev.h"
22 #include "rte_bbdev_pmd.h"
24 #define DEV_NAME "BBDEV"
27 /* BBDev library logging ID */
28 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
30 /* Helper macro for logging */
31 #define rte_bbdev_log(level, fmt, ...) \
32 rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
34 #define rte_bbdev_log_debug(fmt, ...) \
35 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
38 /* Helper macro to check dev_id is valid */
39 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
41 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
46 /* Helper macro to check dev_ops is valid */
47 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
48 if (dev->dev_ops == NULL) { \
49 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
55 /* Helper macro to check that driver implements required function pointer */
56 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
58 rte_bbdev_log(ERR, "device %u does not support %s", \
64 /* Helper macro to check that queue is valid */
65 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
66 if (queue_id >= dev->data->num_queues) { \
67 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
68 queue_id, dev->data->dev_id); \
73 /* List of callback functions registered by an application */
74 struct rte_bbdev_callback {
75 TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */
76 rte_bbdev_cb_fn cb_fn; /* Callback address */
77 void *cb_arg; /* Parameter for callback */
78 void *ret_param; /* Return parameter */
79 enum rte_bbdev_event_type event; /* Interrupt event type */
80 uint32_t active; /* Callback is executing */
83 /* spinlock for bbdev device callbacks */
84 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
87 * Global array of all devices. This is not static because it's used by the
88 * inline enqueue and dequeue functions
90 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
92 /* Global array with rte_bbdev_data structures */
93 static struct rte_bbdev_data *rte_bbdev_data;
95 /* Memzone name for global bbdev data pool */
96 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
98 /* Number of currently valid devices */
99 static uint16_t num_devs;
101 /* Return pointer to device structure, with validity check */
102 static struct rte_bbdev *
103 get_dev(uint16_t dev_id)
105 if (rte_bbdev_is_valid(dev_id))
106 return &rte_bbdev_devices[dev_id];
110 /* Allocate global data array */
112 rte_bbdev_data_alloc(void)
114 const unsigned int flags = 0;
115 const struct rte_memzone *mz;
117 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
118 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
119 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
120 rte_socket_id(), flags);
122 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
125 "Cannot allocate memzone for bbdev port data");
129 rte_bbdev_data = mz->addr;
130 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
131 memset(rte_bbdev_data, 0,
132 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
137 * Find data allocated for the device or if not found return first unused bbdev
138 * data. If all structures are in use and none is used by the device return
141 static struct rte_bbdev_data *
142 find_bbdev_data(const char *name)
146 for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
147 if (strlen(rte_bbdev_data[data_id].name) == 0) {
148 memset(&rte_bbdev_data[data_id], 0,
149 sizeof(struct rte_bbdev_data));
150 return &rte_bbdev_data[data_id];
151 } else if (strncmp(rte_bbdev_data[data_id].name, name,
152 RTE_BBDEV_NAME_MAX_LEN) == 0)
153 return &rte_bbdev_data[data_id];
159 /* Find lowest device id with no attached device */
161 find_free_dev_id(void)
164 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
165 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
168 return RTE_BBDEV_MAX_DEVS;
172 rte_bbdev_allocate(const char *name)
175 struct rte_bbdev *bbdev;
179 rte_bbdev_log(ERR, "Invalid null device name");
183 if (rte_bbdev_get_named_dev(name) != NULL) {
184 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
188 dev_id = find_free_dev_id();
189 if (dev_id == RTE_BBDEV_MAX_DEVS) {
190 rte_bbdev_log(ERR, "Reached maximum number of devices");
194 bbdev = &rte_bbdev_devices[dev_id];
196 if (rte_bbdev_data == NULL) {
197 ret = rte_bbdev_data_alloc();
202 bbdev->data = find_bbdev_data(name);
203 if (bbdev->data == NULL) {
205 "Max BBDevs already allocated in multi-process environment!");
209 __atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED);
210 bbdev->data->dev_id = dev_id;
211 bbdev->state = RTE_BBDEV_INITIALIZED;
213 ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
214 if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
215 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
219 /* init user callbacks */
220 TAILQ_INIT(&(bbdev->list_cbs));
224 rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
225 name, dev_id, num_devs);
231 rte_bbdev_release(struct rte_bbdev *bbdev)
234 struct rte_bbdev_callback *cb, *next;
237 rte_bbdev_log(ERR, "NULL bbdev");
240 dev_id = bbdev->data->dev_id;
242 /* free all callbacks from the device's list */
243 for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
245 next = TAILQ_NEXT(cb, next);
246 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
250 /* clear shared BBDev Data if no process is using the device anymore */
251 if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1,
252 __ATOMIC_RELAXED) == 0)
253 memset(bbdev->data, 0, sizeof(*bbdev->data));
255 memset(bbdev, 0, sizeof(*bbdev));
257 bbdev->state = RTE_BBDEV_UNUSED;
260 "Un-initialised device id = %u. Num devices = %u",
266 rte_bbdev_get_named_dev(const char *name)
271 rte_bbdev_log(ERR, "NULL driver name");
275 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
276 struct rte_bbdev *dev = get_dev(i);
277 if (dev && (strncmp(dev->data->name,
278 name, RTE_BBDEV_NAME_MAX_LEN) == 0))
286 rte_bbdev_count(void)
292 rte_bbdev_is_valid(uint16_t dev_id)
294 if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
295 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
301 rte_bbdev_find_next(uint16_t dev_id)
304 for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
305 if (rte_bbdev_is_valid(dev_id))
311 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
315 struct rte_bbdev_driver_info dev_info;
316 struct rte_bbdev *dev = get_dev(dev_id);
317 VALID_DEV_OR_RET_ERR(dev, dev_id);
319 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
321 if (dev->data->started) {
323 "Device %u cannot be configured when started",
328 /* Get device driver information to get max number of queues */
329 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
330 memset(&dev_info, 0, sizeof(dev_info));
331 dev->dev_ops->info_get(dev, &dev_info);
333 if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
335 "Device %u supports 0 < N <= %u queues, not %u",
336 dev_id, dev_info.max_num_queues, num_queues);
340 /* If re-configuration, get driver to free existing internal memory */
341 if (dev->data->queues != NULL) {
342 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
343 for (i = 0; i < dev->data->num_queues; i++) {
344 int ret = dev->dev_ops->queue_release(dev, i);
347 "Device %u queue %u release failed",
352 /* Call optional device close */
353 if (dev->dev_ops->close) {
354 ret = dev->dev_ops->close(dev);
357 "Device %u couldn't be closed",
362 rte_free(dev->data->queues);
365 /* Allocate queue pointers */
366 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
367 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
368 dev->data->socket_id);
369 if (dev->data->queues == NULL) {
371 "calloc of %u queues for device %u on socket %i failed",
372 num_queues, dev_id, dev->data->socket_id);
376 dev->data->num_queues = num_queues;
378 /* Call optional device configuration */
379 if (dev->dev_ops->setup_queues) {
380 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
383 "Device %u memory configuration failed",
389 rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
394 dev->data->num_queues = 0;
395 rte_free(dev->data->queues);
396 dev->data->queues = NULL;
401 rte_bbdev_intr_enable(uint16_t dev_id)
404 struct rte_bbdev *dev = get_dev(dev_id);
405 VALID_DEV_OR_RET_ERR(dev, dev_id);
407 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
409 if (dev->data->started) {
411 "Device %u cannot be configured when started",
416 if (dev->dev_ops->intr_enable) {
417 ret = dev->dev_ops->intr_enable(dev);
420 "Device %u interrupts configuration failed",
424 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
428 rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
433 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
434 const struct rte_bbdev_queue_conf *conf)
437 struct rte_bbdev_driver_info dev_info;
438 struct rte_bbdev *dev = get_dev(dev_id);
439 const struct rte_bbdev_op_cap *p;
440 struct rte_bbdev_queue_conf *stored_conf;
441 const char *op_type_str;
442 VALID_DEV_OR_RET_ERR(dev, dev_id);
444 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
446 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
448 if (dev->data->queues[queue_id].started || dev->data->started) {
450 "Queue %u of device %u cannot be configured when started",
455 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
456 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
458 /* Get device driver information to verify config is valid */
459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
460 memset(&dev_info, 0, sizeof(dev_info));
461 dev->dev_ops->info_get(dev, &dev_info);
463 /* Check configuration is valid */
465 if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
466 (dev_info.capabilities[0].type ==
467 RTE_BBDEV_OP_NONE)) {
470 for (p = dev_info.capabilities;
471 p->type != RTE_BBDEV_OP_NONE; p++) {
472 if (conf->op_type == p->type) {
479 rte_bbdev_log(ERR, "Invalid operation type");
482 if (conf->queue_size > dev_info.queue_size_lim) {
484 "Size (%u) of queue %u of device %u must be: <= %u",
485 conf->queue_size, queue_id, dev_id,
486 dev_info.queue_size_lim);
489 if (!rte_is_power_of_2(conf->queue_size)) {
491 "Size (%u) of queue %u of device %u must be a power of 2",
492 conf->queue_size, queue_id, dev_id);
495 if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
496 conf->priority > dev_info.max_ul_queue_priority) {
498 "Priority (%u) of queue %u of bbdev %u must be <= %u",
499 conf->priority, queue_id, dev_id,
500 dev_info.max_ul_queue_priority);
503 if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
504 conf->priority > dev_info.max_dl_queue_priority) {
506 "Priority (%u) of queue %u of bbdev %u must be <= %u",
507 conf->priority, queue_id, dev_id,
508 dev_info.max_dl_queue_priority);
513 /* Release existing queue (in case of queue reconfiguration) */
514 if (dev->data->queues[queue_id].queue_private != NULL) {
515 ret = dev->dev_ops->queue_release(dev, queue_id);
517 rte_bbdev_log(ERR, "Device %u queue %u release failed",
523 /* Get driver to setup the queue */
524 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
525 conf : &dev_info.default_queue_conf);
527 /* This may happen when trying different priority levels */
529 "Device %u queue %u setup failed",
534 /* Store configuration */
535 stored_conf = &dev->data->queues[queue_id].conf;
537 (conf != NULL) ? conf : &dev_info.default_queue_conf,
538 sizeof(*stored_conf));
540 op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
541 if (op_type_str == NULL)
544 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
545 dev_id, queue_id, stored_conf->queue_size, op_type_str,
546 stored_conf->priority);
552 rte_bbdev_start(uint16_t dev_id)
555 struct rte_bbdev *dev = get_dev(dev_id);
556 VALID_DEV_OR_RET_ERR(dev, dev_id);
558 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
560 if (dev->data->started) {
561 rte_bbdev_log_debug("Device %u is already started", dev_id);
565 if (dev->dev_ops->start) {
566 int ret = dev->dev_ops->start(dev);
568 rte_bbdev_log(ERR, "Device %u start failed", dev_id);
573 /* Store new state */
574 for (i = 0; i < dev->data->num_queues; i++)
575 if (!dev->data->queues[i].conf.deferred_start)
576 dev->data->queues[i].started = true;
577 dev->data->started = true;
579 rte_bbdev_log_debug("Started device %u", dev_id);
584 rte_bbdev_stop(uint16_t dev_id)
586 struct rte_bbdev *dev = get_dev(dev_id);
587 VALID_DEV_OR_RET_ERR(dev, dev_id);
589 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
591 if (!dev->data->started) {
592 rte_bbdev_log_debug("Device %u is already stopped", dev_id);
596 if (dev->dev_ops->stop)
597 dev->dev_ops->stop(dev);
598 dev->data->started = false;
600 rte_bbdev_log_debug("Stopped device %u", dev_id);
605 rte_bbdev_close(uint16_t dev_id)
609 struct rte_bbdev *dev = get_dev(dev_id);
610 VALID_DEV_OR_RET_ERR(dev, dev_id);
612 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
614 if (dev->data->started) {
615 ret = rte_bbdev_stop(dev_id);
617 rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
622 /* Free memory used by queues */
623 for (i = 0; i < dev->data->num_queues; i++) {
624 ret = dev->dev_ops->queue_release(dev, i);
626 rte_bbdev_log(ERR, "Device %u queue %u release failed",
631 rte_free(dev->data->queues);
633 if (dev->dev_ops->close) {
634 ret = dev->dev_ops->close(dev);
636 rte_bbdev_log(ERR, "Device %u close failed", dev_id);
641 /* Clear configuration */
642 dev->data->queues = NULL;
643 dev->data->num_queues = 0;
645 rte_bbdev_log_debug("Closed device %u", dev_id);
650 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
652 struct rte_bbdev *dev = get_dev(dev_id);
653 VALID_DEV_OR_RET_ERR(dev, dev_id);
655 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
657 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
659 if (dev->data->queues[queue_id].started) {
660 rte_bbdev_log_debug("Queue %u of device %u already started",
665 if (dev->dev_ops->queue_start) {
666 int ret = dev->dev_ops->queue_start(dev, queue_id);
668 rte_bbdev_log(ERR, "Device %u queue %u start failed",
673 dev->data->queues[queue_id].started = true;
675 rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
680 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
682 struct rte_bbdev *dev = get_dev(dev_id);
683 VALID_DEV_OR_RET_ERR(dev, dev_id);
685 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
687 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
689 if (!dev->data->queues[queue_id].started) {
690 rte_bbdev_log_debug("Queue %u of device %u already stopped",
695 if (dev->dev_ops->queue_stop) {
696 int ret = dev->dev_ops->queue_stop(dev, queue_id);
698 rte_bbdev_log(ERR, "Device %u queue %u stop failed",
703 dev->data->queues[queue_id].started = false;
705 rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
709 /* Get device statistics */
711 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
714 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
715 struct rte_bbdev_stats *q_stats =
716 &dev->data->queues[q_id].queue_stats;
718 stats->enqueued_count += q_stats->enqueued_count;
719 stats->dequeued_count += q_stats->dequeued_count;
720 stats->enqueue_err_count += q_stats->enqueue_err_count;
721 stats->dequeue_err_count += q_stats->dequeue_err_count;
723 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
727 reset_stats_in_queues(struct rte_bbdev *dev)
730 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
731 struct rte_bbdev_stats *q_stats =
732 &dev->data->queues[q_id].queue_stats;
734 memset(q_stats, 0, sizeof(*q_stats));
736 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
740 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
742 struct rte_bbdev *dev = get_dev(dev_id);
743 VALID_DEV_OR_RET_ERR(dev, dev_id);
745 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
748 rte_bbdev_log(ERR, "NULL stats structure");
752 memset(stats, 0, sizeof(*stats));
753 if (dev->dev_ops->stats_get != NULL)
754 dev->dev_ops->stats_get(dev, stats);
756 get_stats_from_queues(dev, stats);
758 rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
763 rte_bbdev_stats_reset(uint16_t dev_id)
765 struct rte_bbdev *dev = get_dev(dev_id);
766 VALID_DEV_OR_RET_ERR(dev, dev_id);
768 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
770 if (dev->dev_ops->stats_reset != NULL)
771 dev->dev_ops->stats_reset(dev);
773 reset_stats_in_queues(dev);
775 rte_bbdev_log_debug("Reset stats of device %u", dev_id);
780 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
782 struct rte_bbdev *dev = get_dev(dev_id);
783 VALID_DEV_OR_RET_ERR(dev, dev_id);
785 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
787 if (dev_info == NULL) {
788 rte_bbdev_log(ERR, "NULL dev info structure");
792 /* Copy data maintained by device interface layer */
793 memset(dev_info, 0, sizeof(*dev_info));
794 dev_info->dev_name = dev->data->name;
795 dev_info->num_queues = dev->data->num_queues;
796 dev_info->device = dev->device;
797 dev_info->socket_id = dev->data->socket_id;
798 dev_info->started = dev->data->started;
800 /* Copy data maintained by device driver layer */
801 dev->dev_ops->info_get(dev, &dev_info->drv);
803 rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
808 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
809 struct rte_bbdev_queue_info *queue_info)
811 struct rte_bbdev *dev = get_dev(dev_id);
812 VALID_DEV_OR_RET_ERR(dev, dev_id);
814 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
816 if (queue_info == NULL) {
817 rte_bbdev_log(ERR, "NULL queue info structure");
821 /* Copy data to output */
822 memset(queue_info, 0, sizeof(*queue_info));
823 queue_info->conf = dev->data->queues[queue_id].conf;
824 queue_info->started = dev->data->queues[queue_id].started;
826 rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
831 /* Calculate size needed to store bbdev_op, depending on type */
833 get_bbdev_op_size(enum rte_bbdev_op_type type)
835 unsigned int result = 0;
837 case RTE_BBDEV_OP_NONE:
838 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
839 sizeof(struct rte_bbdev_enc_op));
841 case RTE_BBDEV_OP_TURBO_DEC:
842 result = sizeof(struct rte_bbdev_dec_op);
844 case RTE_BBDEV_OP_TURBO_ENC:
845 result = sizeof(struct rte_bbdev_enc_op);
847 case RTE_BBDEV_OP_LDPC_DEC:
848 result = sizeof(struct rte_bbdev_dec_op);
850 case RTE_BBDEV_OP_LDPC_ENC:
851 result = sizeof(struct rte_bbdev_enc_op);
860 /* Initialise a bbdev_op structure */
862 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
863 __rte_unused unsigned int n)
865 enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
867 if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
868 struct rte_bbdev_dec_op *op = element;
869 memset(op, 0, mempool->elt_size);
870 op->mempool = mempool;
871 } else if (type == RTE_BBDEV_OP_TURBO_ENC ||
872 type == RTE_BBDEV_OP_LDPC_ENC) {
873 struct rte_bbdev_enc_op *op = element;
874 memset(op, 0, mempool->elt_size);
875 op->mempool = mempool;
880 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
881 unsigned int num_elements, unsigned int cache_size,
884 struct rte_bbdev_op_pool_private *priv;
885 struct rte_mempool *mp;
886 const char *op_type_str;
889 rte_bbdev_log(ERR, "NULL name for op pool");
893 if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
895 "Invalid op type (%u), should be less than %u",
896 type, RTE_BBDEV_OP_TYPE_COUNT);
900 mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
901 cache_size, sizeof(struct rte_bbdev_op_pool_private),
902 NULL, NULL, bbdev_op_init, &type, socket_id, 0);
905 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
906 name, num_elements, get_bbdev_op_size(type),
907 rte_strerror(rte_errno));
911 op_type_str = rte_bbdev_op_type_str(type);
912 if (op_type_str == NULL)
916 "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
917 name, num_elements, op_type_str, cache_size, socket_id,
918 get_bbdev_op_size(type));
920 priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
927 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
928 rte_bbdev_cb_fn cb_fn, void *cb_arg)
930 struct rte_bbdev_callback *user_cb;
931 struct rte_bbdev *dev = get_dev(dev_id);
932 VALID_DEV_OR_RET_ERR(dev, dev_id);
934 if (event >= RTE_BBDEV_EVENT_MAX) {
936 "Invalid event type (%u), should be less than %u",
937 event, RTE_BBDEV_EVENT_MAX);
942 rte_bbdev_log(ERR, "NULL callback function");
946 rte_spinlock_lock(&rte_bbdev_cb_lock);
948 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
949 if (user_cb->cb_fn == cb_fn &&
950 user_cb->cb_arg == cb_arg &&
951 user_cb->event == event)
955 /* create a new callback. */
956 if (user_cb == NULL) {
957 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
958 sizeof(struct rte_bbdev_callback), 0);
959 if (user_cb != NULL) {
960 user_cb->cb_fn = cb_fn;
961 user_cb->cb_arg = cb_arg;
962 user_cb->event = event;
963 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
967 rte_spinlock_unlock(&rte_bbdev_cb_lock);
968 return (user_cb == NULL) ? -ENOMEM : 0;
972 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
973 rte_bbdev_cb_fn cb_fn, void *cb_arg)
976 struct rte_bbdev_callback *cb, *next;
977 struct rte_bbdev *dev = get_dev(dev_id);
978 VALID_DEV_OR_RET_ERR(dev, dev_id);
980 if (event >= RTE_BBDEV_EVENT_MAX) {
982 "Invalid event type (%u), should be less than %u",
983 event, RTE_BBDEV_EVENT_MAX);
989 "NULL callback function cannot be unregistered");
993 dev = &rte_bbdev_devices[dev_id];
994 rte_spinlock_lock(&rte_bbdev_cb_lock);
996 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
998 next = TAILQ_NEXT(cb, next);
1000 if (cb->cb_fn != cb_fn || cb->event != event ||
1001 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1004 /* If this callback is not executing right now, remove it. */
1005 if (cb->active == 0) {
1006 TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1012 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1017 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1018 enum rte_bbdev_event_type event, void *ret_param)
1020 struct rte_bbdev_callback *cb_lst;
1021 struct rte_bbdev_callback dev_cb;
1024 rte_bbdev_log(ERR, "NULL device");
1028 if (dev->data == NULL) {
1029 rte_bbdev_log(ERR, "NULL data structure");
1033 if (event >= RTE_BBDEV_EVENT_MAX) {
1035 "Invalid event type (%u), should be less than %u",
1036 event, RTE_BBDEV_EVENT_MAX);
1040 rte_spinlock_lock(&rte_bbdev_cb_lock);
1041 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1042 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1046 if (ret_param != NULL)
1047 dev_cb.ret_param = ret_param;
1049 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1050 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1051 dev_cb.cb_arg, dev_cb.ret_param);
1052 rte_spinlock_lock(&rte_bbdev_cb_lock);
1055 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1059 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1061 struct rte_bbdev *dev = get_dev(dev_id);
1062 VALID_DEV_OR_RET_ERR(dev, dev_id);
1063 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1064 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1065 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1066 return dev->dev_ops->queue_intr_enable(dev, queue_id);
1070 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1072 struct rte_bbdev *dev = get_dev(dev_id);
1073 VALID_DEV_OR_RET_ERR(dev, dev_id);
1074 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1075 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1076 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1077 return dev->dev_ops->queue_intr_disable(dev, queue_id);
1081 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1085 struct rte_bbdev *dev = get_dev(dev_id);
1086 struct rte_intr_handle *intr_handle;
1089 VALID_DEV_OR_RET_ERR(dev, dev_id);
1090 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1092 intr_handle = dev->intr_handle;
1093 if (intr_handle == NULL) {
1094 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1098 if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1099 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1104 vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1105 ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1106 if (ret && (ret != -EEXIST)) {
1108 "dev %u q %u int ctl error op %d epfd %d vec %u\n",
1109 dev_id, queue_id, op, epfd, vec);
1118 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1120 static const char * const op_types[] = {
1121 "RTE_BBDEV_OP_NONE",
1122 "RTE_BBDEV_OP_TURBO_DEC",
1123 "RTE_BBDEV_OP_TURBO_ENC",
1124 "RTE_BBDEV_OP_LDPC_DEC",
1125 "RTE_BBDEV_OP_LDPC_ENC",
1128 if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1129 return op_types[op_type];
1131 rte_bbdev_log(ERR, "Invalid operation type");