1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_compat.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
13 #include <rte_debug.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
20 #include <rte_spinlock.h>
21 #include <rte_tailq.h>
22 #include <rte_interrupts.h>
24 #include "rte_bbdev_op.h"
25 #include "rte_bbdev.h"
26 #include "rte_bbdev_pmd.h"
28 #define DEV_NAME "BBDEV"
31 /* BBDev library logging ID */
32 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
34 /* Helper macro for logging */
35 #define rte_bbdev_log(level, fmt, ...) \
36 rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
38 #define rte_bbdev_log_debug(fmt, ...) \
39 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
42 /* Helper macro to check dev_id is valid */
43 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
45 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
50 /* Helper macro to check dev_ops is valid */
51 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
52 if (dev->dev_ops == NULL) { \
53 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
59 /* Helper macro to check that driver implements required function pointer */
60 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
62 rte_bbdev_log(ERR, "device %u does not support %s", \
68 /* Helper macro to check that queue is valid */
69 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
70 if (queue_id >= dev->data->num_queues) { \
71 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
72 queue_id, dev->data->dev_id); \
77 /* List of callback functions registered by an application */
78 struct rte_bbdev_callback {
79 TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */
80 rte_bbdev_cb_fn cb_fn; /* Callback address */
81 void *cb_arg; /* Parameter for callback */
82 void *ret_param; /* Return parameter */
83 enum rte_bbdev_event_type event; /* Interrupt event type */
84 uint32_t active; /* Callback is executing */
87 /* spinlock for bbdev device callbacks */
88 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
91 * Global array of all devices. This is not static because it's used by the
92 * inline enqueue and dequeue functions
94 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
96 /* Global array with rte_bbdev_data structures */
97 static struct rte_bbdev_data *rte_bbdev_data;
99 /* Memzone name for global bbdev data pool */
100 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
102 /* Number of currently valid devices */
103 static uint16_t num_devs;
105 /* Return pointer to device structure, with validity check */
106 static struct rte_bbdev *
107 get_dev(uint16_t dev_id)
109 if (rte_bbdev_is_valid(dev_id))
110 return &rte_bbdev_devices[dev_id];
114 /* Allocate global data array */
116 rte_bbdev_data_alloc(void)
118 const unsigned int flags = 0;
119 const struct rte_memzone *mz;
121 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
122 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
123 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
124 rte_socket_id(), flags);
126 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
129 "Cannot allocate memzone for bbdev port data");
133 rte_bbdev_data = mz->addr;
134 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135 memset(rte_bbdev_data, 0,
136 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
141 * Find data alocated for the device or if not found return first unused bbdev
142 * data. If all structures are in use and none is used by the device return
145 static struct rte_bbdev_data *
146 find_bbdev_data(const char *name)
150 for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
151 if (strlen(rte_bbdev_data[data_id].name) == 0) {
152 memset(&rte_bbdev_data[data_id], 0,
153 sizeof(struct rte_bbdev_data));
154 return &rte_bbdev_data[data_id];
155 } else if (strncmp(rte_bbdev_data[data_id].name, name,
156 RTE_BBDEV_NAME_MAX_LEN) == 0)
157 return &rte_bbdev_data[data_id];
163 /* Find lowest device id with no attached device */
165 find_free_dev_id(void)
168 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
169 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
172 return RTE_BBDEV_MAX_DEVS;
176 rte_bbdev_allocate(const char *name)
179 struct rte_bbdev *bbdev;
183 rte_bbdev_log(ERR, "Invalid null device name");
187 if (rte_bbdev_get_named_dev(name) != NULL) {
188 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
192 dev_id = find_free_dev_id();
193 if (dev_id == RTE_BBDEV_MAX_DEVS) {
194 rte_bbdev_log(ERR, "Reached maximum number of devices");
198 bbdev = &rte_bbdev_devices[dev_id];
200 if (rte_bbdev_data == NULL) {
201 ret = rte_bbdev_data_alloc();
206 bbdev->data = find_bbdev_data(name);
207 if (bbdev->data == NULL) {
209 "Max BBDevs already allocated in multi-process environment!");
213 __atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED);
214 bbdev->data->dev_id = dev_id;
215 bbdev->state = RTE_BBDEV_INITIALIZED;
217 ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
218 if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
219 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
223 /* init user callbacks */
224 TAILQ_INIT(&(bbdev->list_cbs));
228 rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
229 name, dev_id, num_devs);
235 rte_bbdev_release(struct rte_bbdev *bbdev)
238 struct rte_bbdev_callback *cb, *next;
241 rte_bbdev_log(ERR, "NULL bbdev");
244 dev_id = bbdev->data->dev_id;
246 /* free all callbacks from the device's list */
247 for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
249 next = TAILQ_NEXT(cb, next);
250 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
254 /* clear shared BBDev Data if no process is using the device anymore */
255 if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1,
256 __ATOMIC_RELAXED) == 0)
257 memset(bbdev->data, 0, sizeof(*bbdev->data));
259 memset(bbdev, 0, sizeof(*bbdev));
261 bbdev->state = RTE_BBDEV_UNUSED;
264 "Un-initialised device id = %u. Num devices = %u",
270 rte_bbdev_get_named_dev(const char *name)
275 rte_bbdev_log(ERR, "NULL driver name");
279 for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
280 struct rte_bbdev *dev = get_dev(i);
281 if (dev && (strncmp(dev->data->name,
282 name, RTE_BBDEV_NAME_MAX_LEN) == 0))
290 rte_bbdev_count(void)
296 rte_bbdev_is_valid(uint16_t dev_id)
298 if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
299 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
305 rte_bbdev_find_next(uint16_t dev_id)
308 for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
309 if (rte_bbdev_is_valid(dev_id))
315 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
319 struct rte_bbdev_driver_info dev_info;
320 struct rte_bbdev *dev = get_dev(dev_id);
321 VALID_DEV_OR_RET_ERR(dev, dev_id);
323 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
325 if (dev->data->started) {
327 "Device %u cannot be configured when started",
332 /* Get device driver information to get max number of queues */
333 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
334 memset(&dev_info, 0, sizeof(dev_info));
335 dev->dev_ops->info_get(dev, &dev_info);
337 if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
339 "Device %u supports 0 < N <= %u queues, not %u",
340 dev_id, dev_info.max_num_queues, num_queues);
344 /* If re-configuration, get driver to free existing internal memory */
345 if (dev->data->queues != NULL) {
346 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
347 for (i = 0; i < dev->data->num_queues; i++) {
348 int ret = dev->dev_ops->queue_release(dev, i);
351 "Device %u queue %u release failed",
356 /* Call optional device close */
357 if (dev->dev_ops->close) {
358 ret = dev->dev_ops->close(dev);
361 "Device %u couldn't be closed",
366 rte_free(dev->data->queues);
369 /* Allocate queue pointers */
370 dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
371 sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
372 dev->data->socket_id);
373 if (dev->data->queues == NULL) {
375 "calloc of %u queues for device %u on socket %i failed",
376 num_queues, dev_id, dev->data->socket_id);
380 dev->data->num_queues = num_queues;
382 /* Call optional device configuration */
383 if (dev->dev_ops->setup_queues) {
384 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
387 "Device %u memory configuration failed",
393 rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
398 dev->data->num_queues = 0;
399 rte_free(dev->data->queues);
400 dev->data->queues = NULL;
405 rte_bbdev_intr_enable(uint16_t dev_id)
408 struct rte_bbdev *dev = get_dev(dev_id);
409 VALID_DEV_OR_RET_ERR(dev, dev_id);
411 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
413 if (dev->data->started) {
415 "Device %u cannot be configured when started",
420 if (dev->dev_ops->intr_enable) {
421 ret = dev->dev_ops->intr_enable(dev);
424 "Device %u interrupts configuration failed",
428 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
432 rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
437 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
438 const struct rte_bbdev_queue_conf *conf)
441 struct rte_bbdev_driver_info dev_info;
442 struct rte_bbdev *dev = get_dev(dev_id);
443 const struct rte_bbdev_op_cap *p;
444 struct rte_bbdev_queue_conf *stored_conf;
445 const char *op_type_str;
446 VALID_DEV_OR_RET_ERR(dev, dev_id);
448 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
450 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
452 if (dev->data->queues[queue_id].started || dev->data->started) {
454 "Queue %u of device %u cannot be configured when started",
459 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
460 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
462 /* Get device driver information to verify config is valid */
463 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
464 memset(&dev_info, 0, sizeof(dev_info));
465 dev->dev_ops->info_get(dev, &dev_info);
467 /* Check configuration is valid */
469 if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
470 (dev_info.capabilities[0].type ==
471 RTE_BBDEV_OP_NONE)) {
474 for (p = dev_info.capabilities;
475 p->type != RTE_BBDEV_OP_NONE; p++) {
476 if (conf->op_type == p->type) {
483 rte_bbdev_log(ERR, "Invalid operation type");
486 if (conf->queue_size > dev_info.queue_size_lim) {
488 "Size (%u) of queue %u of device %u must be: <= %u",
489 conf->queue_size, queue_id, dev_id,
490 dev_info.queue_size_lim);
493 if (!rte_is_power_of_2(conf->queue_size)) {
495 "Size (%u) of queue %u of device %u must be a power of 2",
496 conf->queue_size, queue_id, dev_id);
499 if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
500 conf->priority > dev_info.max_ul_queue_priority) {
502 "Priority (%u) of queue %u of bbdev %u must be <= %u",
503 conf->priority, queue_id, dev_id,
504 dev_info.max_ul_queue_priority);
507 if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
508 conf->priority > dev_info.max_dl_queue_priority) {
510 "Priority (%u) of queue %u of bbdev %u must be <= %u",
511 conf->priority, queue_id, dev_id,
512 dev_info.max_dl_queue_priority);
517 /* Release existing queue (in case of queue reconfiguration) */
518 if (dev->data->queues[queue_id].queue_private != NULL) {
519 ret = dev->dev_ops->queue_release(dev, queue_id);
521 rte_bbdev_log(ERR, "Device %u queue %u release failed",
527 /* Get driver to setup the queue */
528 ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
529 conf : &dev_info.default_queue_conf);
531 /* This may happen when trying different priority levels */
533 "Device %u queue %u setup failed",
538 /* Store configuration */
539 stored_conf = &dev->data->queues[queue_id].conf;
541 (conf != NULL) ? conf : &dev_info.default_queue_conf,
542 sizeof(*stored_conf));
544 op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
545 if (op_type_str == NULL)
548 rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
549 dev_id, queue_id, stored_conf->queue_size, op_type_str,
550 stored_conf->priority);
556 rte_bbdev_start(uint16_t dev_id)
559 struct rte_bbdev *dev = get_dev(dev_id);
560 VALID_DEV_OR_RET_ERR(dev, dev_id);
562 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
564 if (dev->data->started) {
565 rte_bbdev_log_debug("Device %u is already started", dev_id);
569 if (dev->dev_ops->start) {
570 int ret = dev->dev_ops->start(dev);
572 rte_bbdev_log(ERR, "Device %u start failed", dev_id);
577 /* Store new state */
578 for (i = 0; i < dev->data->num_queues; i++)
579 if (!dev->data->queues[i].conf.deferred_start)
580 dev->data->queues[i].started = true;
581 dev->data->started = true;
583 rte_bbdev_log_debug("Started device %u", dev_id);
588 rte_bbdev_stop(uint16_t dev_id)
590 struct rte_bbdev *dev = get_dev(dev_id);
591 VALID_DEV_OR_RET_ERR(dev, dev_id);
593 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
595 if (!dev->data->started) {
596 rte_bbdev_log_debug("Device %u is already stopped", dev_id);
600 if (dev->dev_ops->stop)
601 dev->dev_ops->stop(dev);
602 dev->data->started = false;
604 rte_bbdev_log_debug("Stopped device %u", dev_id);
609 rte_bbdev_close(uint16_t dev_id)
613 struct rte_bbdev *dev = get_dev(dev_id);
614 VALID_DEV_OR_RET_ERR(dev, dev_id);
616 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
618 if (dev->data->started) {
619 ret = rte_bbdev_stop(dev_id);
621 rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
626 /* Free memory used by queues */
627 for (i = 0; i < dev->data->num_queues; i++) {
628 ret = dev->dev_ops->queue_release(dev, i);
630 rte_bbdev_log(ERR, "Device %u queue %u release failed",
635 rte_free(dev->data->queues);
637 if (dev->dev_ops->close) {
638 ret = dev->dev_ops->close(dev);
640 rte_bbdev_log(ERR, "Device %u close failed", dev_id);
645 /* Clear configuration */
646 dev->data->queues = NULL;
647 dev->data->num_queues = 0;
649 rte_bbdev_log_debug("Closed device %u", dev_id);
654 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
656 struct rte_bbdev *dev = get_dev(dev_id);
657 VALID_DEV_OR_RET_ERR(dev, dev_id);
659 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
661 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
663 if (dev->data->queues[queue_id].started) {
664 rte_bbdev_log_debug("Queue %u of device %u already started",
669 if (dev->dev_ops->queue_start) {
670 int ret = dev->dev_ops->queue_start(dev, queue_id);
672 rte_bbdev_log(ERR, "Device %u queue %u start failed",
677 dev->data->queues[queue_id].started = true;
679 rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
684 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
686 struct rte_bbdev *dev = get_dev(dev_id);
687 VALID_DEV_OR_RET_ERR(dev, dev_id);
689 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
691 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
693 if (!dev->data->queues[queue_id].started) {
694 rte_bbdev_log_debug("Queue %u of device %u already stopped",
699 if (dev->dev_ops->queue_stop) {
700 int ret = dev->dev_ops->queue_stop(dev, queue_id);
702 rte_bbdev_log(ERR, "Device %u queue %u stop failed",
707 dev->data->queues[queue_id].started = false;
709 rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
713 /* Get device statistics */
715 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
718 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
719 struct rte_bbdev_stats *q_stats =
720 &dev->data->queues[q_id].queue_stats;
722 stats->enqueued_count += q_stats->enqueued_count;
723 stats->dequeued_count += q_stats->dequeued_count;
724 stats->enqueue_err_count += q_stats->enqueue_err_count;
725 stats->dequeue_err_count += q_stats->dequeue_err_count;
727 rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
731 reset_stats_in_queues(struct rte_bbdev *dev)
734 for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
735 struct rte_bbdev_stats *q_stats =
736 &dev->data->queues[q_id].queue_stats;
738 memset(q_stats, 0, sizeof(*q_stats));
740 rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
744 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
746 struct rte_bbdev *dev = get_dev(dev_id);
747 VALID_DEV_OR_RET_ERR(dev, dev_id);
749 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
752 rte_bbdev_log(ERR, "NULL stats structure");
756 memset(stats, 0, sizeof(*stats));
757 if (dev->dev_ops->stats_get != NULL)
758 dev->dev_ops->stats_get(dev, stats);
760 get_stats_from_queues(dev, stats);
762 rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
767 rte_bbdev_stats_reset(uint16_t dev_id)
769 struct rte_bbdev *dev = get_dev(dev_id);
770 VALID_DEV_OR_RET_ERR(dev, dev_id);
772 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
774 if (dev->dev_ops->stats_reset != NULL)
775 dev->dev_ops->stats_reset(dev);
777 reset_stats_in_queues(dev);
779 rte_bbdev_log_debug("Reset stats of device %u", dev_id);
784 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
786 struct rte_bbdev *dev = get_dev(dev_id);
787 VALID_DEV_OR_RET_ERR(dev, dev_id);
789 VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
791 if (dev_info == NULL) {
792 rte_bbdev_log(ERR, "NULL dev info structure");
796 /* Copy data maintained by device interface layer */
797 memset(dev_info, 0, sizeof(*dev_info));
798 dev_info->dev_name = dev->data->name;
799 dev_info->num_queues = dev->data->num_queues;
800 dev_info->device = dev->device;
801 dev_info->socket_id = dev->data->socket_id;
802 dev_info->started = dev->data->started;
804 /* Copy data maintained by device driver layer */
805 dev->dev_ops->info_get(dev, &dev_info->drv);
807 rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
812 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
813 struct rte_bbdev_queue_info *queue_info)
815 struct rte_bbdev *dev = get_dev(dev_id);
816 VALID_DEV_OR_RET_ERR(dev, dev_id);
818 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
820 if (queue_info == NULL) {
821 rte_bbdev_log(ERR, "NULL queue info structure");
825 /* Copy data to output */
826 memset(queue_info, 0, sizeof(*queue_info));
827 queue_info->conf = dev->data->queues[queue_id].conf;
828 queue_info->started = dev->data->queues[queue_id].started;
830 rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
835 /* Calculate size needed to store bbdev_op, depending on type */
837 get_bbdev_op_size(enum rte_bbdev_op_type type)
839 unsigned int result = 0;
841 case RTE_BBDEV_OP_NONE:
842 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
843 sizeof(struct rte_bbdev_enc_op));
845 case RTE_BBDEV_OP_TURBO_DEC:
846 result = sizeof(struct rte_bbdev_dec_op);
848 case RTE_BBDEV_OP_TURBO_ENC:
849 result = sizeof(struct rte_bbdev_enc_op);
851 case RTE_BBDEV_OP_LDPC_DEC:
852 result = sizeof(struct rte_bbdev_dec_op);
854 case RTE_BBDEV_OP_LDPC_ENC:
855 result = sizeof(struct rte_bbdev_enc_op);
864 /* Initialise a bbdev_op structure */
866 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
867 __rte_unused unsigned int n)
869 enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
871 if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
872 struct rte_bbdev_dec_op *op = element;
873 memset(op, 0, mempool->elt_size);
874 op->mempool = mempool;
875 } else if (type == RTE_BBDEV_OP_TURBO_ENC ||
876 type == RTE_BBDEV_OP_LDPC_ENC) {
877 struct rte_bbdev_enc_op *op = element;
878 memset(op, 0, mempool->elt_size);
879 op->mempool = mempool;
884 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
885 unsigned int num_elements, unsigned int cache_size,
888 struct rte_bbdev_op_pool_private *priv;
889 struct rte_mempool *mp;
890 const char *op_type_str;
893 rte_bbdev_log(ERR, "NULL name for op pool");
897 if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
899 "Invalid op type (%u), should be less than %u",
900 type, RTE_BBDEV_OP_TYPE_COUNT);
904 mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
905 cache_size, sizeof(struct rte_bbdev_op_pool_private),
906 NULL, NULL, bbdev_op_init, &type, socket_id, 0);
909 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
910 name, num_elements, get_bbdev_op_size(type),
911 rte_strerror(rte_errno));
915 op_type_str = rte_bbdev_op_type_str(type);
916 if (op_type_str == NULL)
920 "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
921 name, num_elements, op_type_str, cache_size, socket_id,
922 get_bbdev_op_size(type));
924 priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
931 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
932 rte_bbdev_cb_fn cb_fn, void *cb_arg)
934 struct rte_bbdev_callback *user_cb;
935 struct rte_bbdev *dev = get_dev(dev_id);
936 VALID_DEV_OR_RET_ERR(dev, dev_id);
938 if (event >= RTE_BBDEV_EVENT_MAX) {
940 "Invalid event type (%u), should be less than %u",
941 event, RTE_BBDEV_EVENT_MAX);
946 rte_bbdev_log(ERR, "NULL callback function");
950 rte_spinlock_lock(&rte_bbdev_cb_lock);
952 TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
953 if (user_cb->cb_fn == cb_fn &&
954 user_cb->cb_arg == cb_arg &&
955 user_cb->event == event)
959 /* create a new callback. */
960 if (user_cb == NULL) {
961 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
962 sizeof(struct rte_bbdev_callback), 0);
963 if (user_cb != NULL) {
964 user_cb->cb_fn = cb_fn;
965 user_cb->cb_arg = cb_arg;
966 user_cb->event = event;
967 TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
971 rte_spinlock_unlock(&rte_bbdev_cb_lock);
972 return (user_cb == NULL) ? -ENOMEM : 0;
976 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
977 rte_bbdev_cb_fn cb_fn, void *cb_arg)
980 struct rte_bbdev_callback *cb, *next;
981 struct rte_bbdev *dev = get_dev(dev_id);
982 VALID_DEV_OR_RET_ERR(dev, dev_id);
984 if (event >= RTE_BBDEV_EVENT_MAX) {
986 "Invalid event type (%u), should be less than %u",
987 event, RTE_BBDEV_EVENT_MAX);
993 "NULL callback function cannot be unregistered");
997 dev = &rte_bbdev_devices[dev_id];
998 rte_spinlock_lock(&rte_bbdev_cb_lock);
1000 for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1002 next = TAILQ_NEXT(cb, next);
1004 if (cb->cb_fn != cb_fn || cb->event != event ||
1005 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1008 /* If this callback is not executing right now, remove it. */
1009 if (cb->active == 0) {
1010 TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1016 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1021 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1022 enum rte_bbdev_event_type event, void *ret_param)
1024 struct rte_bbdev_callback *cb_lst;
1025 struct rte_bbdev_callback dev_cb;
1028 rte_bbdev_log(ERR, "NULL device");
1032 if (dev->data == NULL) {
1033 rte_bbdev_log(ERR, "NULL data structure");
1037 if (event >= RTE_BBDEV_EVENT_MAX) {
1039 "Invalid event type (%u), should be less than %u",
1040 event, RTE_BBDEV_EVENT_MAX);
1044 rte_spinlock_lock(&rte_bbdev_cb_lock);
1045 TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1046 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1050 if (ret_param != NULL)
1051 dev_cb.ret_param = ret_param;
1053 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1054 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1055 dev_cb.cb_arg, dev_cb.ret_param);
1056 rte_spinlock_lock(&rte_bbdev_cb_lock);
1059 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1063 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1065 struct rte_bbdev *dev = get_dev(dev_id);
1066 VALID_DEV_OR_RET_ERR(dev, dev_id);
1067 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1068 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1069 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1070 return dev->dev_ops->queue_intr_enable(dev, queue_id);
1074 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1076 struct rte_bbdev *dev = get_dev(dev_id);
1077 VALID_DEV_OR_RET_ERR(dev, dev_id);
1078 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1079 VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1080 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1081 return dev->dev_ops->queue_intr_disable(dev, queue_id);
1085 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1089 struct rte_bbdev *dev = get_dev(dev_id);
1090 struct rte_intr_handle *intr_handle;
1093 VALID_DEV_OR_RET_ERR(dev, dev_id);
1094 VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1096 intr_handle = dev->intr_handle;
1097 if (!intr_handle || !intr_handle->intr_vec) {
1098 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1102 if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1103 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1108 vec = intr_handle->intr_vec[queue_id];
1109 ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1110 if (ret && (ret != -EEXIST)) {
1112 "dev %u q %u int ctl error op %d epfd %d vec %u\n",
1113 dev_id, queue_id, op, epfd, vec);
1122 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1124 static const char * const op_types[] = {
1125 "RTE_BBDEV_OP_NONE",
1126 "RTE_BBDEV_OP_TURBO_DEC",
1127 "RTE_BBDEV_OP_TURBO_ENC",
1128 "RTE_BBDEV_OP_LDPC_DEC",
1129 "RTE_BBDEV_OP_LDPC_ENC",
1132 if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1133 return op_types[op_type];
1135 rte_bbdev_log(ERR, "Invalid operation type");