bus/pci: use SPDX tags in 6WIND copyrighted files
[dpdk.git] / lib / librte_bbdev / rte_bbdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8
9 #include <rte_compat.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_log.h>
13 #include <rte_debug.h>
14 #include <rte_eal.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
19 #include <rte_dev.h>
20 #include <rte_spinlock.h>
21 #include <rte_tailq.h>
22 #include <rte_interrupts.h>
23
24 #include "rte_bbdev_op.h"
25 #include "rte_bbdev.h"
26 #include "rte_bbdev_pmd.h"
27
28 #define DEV_NAME "BBDEV"
29
30
31 /* Helper macro to check dev_id is valid */
32 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
33         if (dev == NULL) { \
34                 rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
35                 return -ENODEV; \
36         } \
37 } while (0)
38
39 /* Helper macro to check dev_ops is valid */
40 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
41         if (dev->dev_ops == NULL) { \
42                 rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
43                                 dev_id); \
44                 return -ENODEV; \
45         } \
46 } while (0)
47
48 /* Helper macro to check that driver implements required function pointer */
49 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
50         if (func == NULL) { \
51                 rte_bbdev_log(ERR, "device %u does not support %s", \
52                                 dev_id, #func); \
53                 return -ENOTSUP; \
54         } \
55 } while (0)
56
57 /* Helper macro to check that queue is valid */
58 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
59         if (queue_id >= dev->data->num_queues) { \
60                 rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
61                                 queue_id, dev->data->dev_id); \
62                 return -ERANGE; \
63         } \
64 } while (0)
65
66 /* List of callback functions registered by an application */
67 struct rte_bbdev_callback {
68         TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
69         rte_bbdev_cb_fn cb_fn;  /* Callback address */
70         void *cb_arg;  /* Parameter for callback */
71         void *ret_param;  /* Return parameter */
72         enum rte_bbdev_event_type event; /* Interrupt event type */
73         uint32_t active; /* Callback is executing */
74 };
75
76 /* spinlock for bbdev device callbacks */
77 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
78
79 /*
80  * Global array of all devices. This is not static because it's used by the
81  * inline enqueue and dequeue functions
82  */
83 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
84
85 /* Global array with rte_bbdev_data structures */
86 static struct rte_bbdev_data *rte_bbdev_data;
87
88 /* Memzone name for global bbdev data pool */
89 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
90
91 /* Number of currently valid devices */
92 static uint16_t num_devs;
93
94 /* Return pointer to device structure, with validity check */
95 static struct rte_bbdev *
96 get_dev(uint16_t dev_id)
97 {
98         if (rte_bbdev_is_valid(dev_id))
99                 return &rte_bbdev_devices[dev_id];
100         return NULL;
101 }
102
103 /* Allocate global data array */
104 static int
105 rte_bbdev_data_alloc(void)
106 {
107         const unsigned int flags = 0;
108         const struct rte_memzone *mz;
109
110         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
111                 mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
112                                 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
113                                 rte_socket_id(), flags);
114         } else
115                 mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
116         if (mz == NULL) {
117                 rte_bbdev_log(CRIT,
118                                 "Cannot allocate memzone for bbdev port data");
119                 return -ENOMEM;
120         }
121
122         rte_bbdev_data = mz->addr;
123         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
124                 memset(rte_bbdev_data, 0,
125                                 RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
126         return 0;
127 }
128
129 /*
130  * Find data alocated for the device or if not found return first unused bbdev
131  * data. If all structures are in use and none is used by the device return
132  * NULL.
133  */
134 static struct rte_bbdev_data *
135 find_bbdev_data(const char *name)
136 {
137         uint16_t data_id;
138
139         for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
140                 if (strlen(rte_bbdev_data[data_id].name) == 0) {
141                         memset(&rte_bbdev_data[data_id], 0,
142                                         sizeof(struct rte_bbdev_data));
143                         return &rte_bbdev_data[data_id];
144                 } else if (strncmp(rte_bbdev_data[data_id].name, name,
145                                 RTE_BBDEV_NAME_MAX_LEN) == 0)
146                         return &rte_bbdev_data[data_id];
147         }
148
149         return NULL;
150 }
151
152 /* Find lowest device id with no attached device */
153 static uint16_t
154 find_free_dev_id(void)
155 {
156         uint16_t i;
157         for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
158                 if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
159                         return i;
160         }
161         return RTE_BBDEV_MAX_DEVS;
162 }
163
164 struct rte_bbdev * __rte_experimental
165 rte_bbdev_allocate(const char *name)
166 {
167         int ret;
168         struct rte_bbdev *bbdev;
169         uint16_t dev_id;
170
171         if (name == NULL) {
172                 rte_bbdev_log(ERR, "Invalid null device name");
173                 return NULL;
174         }
175
176         if (rte_bbdev_get_named_dev(name) != NULL) {
177                 rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
178                 return NULL;
179         }
180
181         dev_id = find_free_dev_id();
182         if (dev_id == RTE_BBDEV_MAX_DEVS) {
183                 rte_bbdev_log(ERR, "Reached maximum number of devices");
184                 return NULL;
185         }
186
187         bbdev = &rte_bbdev_devices[dev_id];
188
189         if (rte_bbdev_data == NULL) {
190                 ret = rte_bbdev_data_alloc();
191                 if (ret != 0)
192                         return NULL;
193         }
194
195         bbdev->data = find_bbdev_data(name);
196         if (bbdev->data == NULL) {
197                 rte_bbdev_log(ERR,
198                                 "Max BBDevs already allocated in multi-process environment!");
199                 return NULL;
200         }
201
202         rte_atomic16_inc(&bbdev->data->process_cnt);
203         bbdev->data->dev_id = dev_id;
204         bbdev->state = RTE_BBDEV_INITIALIZED;
205
206         ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
207         if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
208                 rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
209                 return NULL;
210         }
211
212         /* init user callbacks */
213         TAILQ_INIT(&(bbdev->list_cbs));
214
215         num_devs++;
216
217         rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
218                         name, dev_id, num_devs);
219
220         return bbdev;
221 }
222
223 int __rte_experimental
224 rte_bbdev_release(struct rte_bbdev *bbdev)
225 {
226         uint16_t dev_id;
227         struct rte_bbdev_callback *cb, *next;
228
229         if (bbdev == NULL) {
230                 rte_bbdev_log(ERR, "NULL bbdev");
231                 return -ENODEV;
232         }
233         dev_id = bbdev->data->dev_id;
234
235         /* free all callbacks from the device's list */
236         for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
237
238                 next = TAILQ_NEXT(cb, next);
239                 TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
240                 rte_free(cb);
241         }
242
243         /* clear shared BBDev Data if no process is using the device anymore */
244         if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
245                 memset(bbdev->data, 0, sizeof(*bbdev->data));
246
247         memset(bbdev, 0, sizeof(*bbdev));
248         num_devs--;
249         bbdev->state = RTE_BBDEV_UNUSED;
250
251         rte_bbdev_log_debug(
252                         "Un-initialised device id = %u. Num devices = %u",
253                         dev_id, num_devs);
254         return 0;
255 }
256
257 struct rte_bbdev * __rte_experimental
258 rte_bbdev_get_named_dev(const char *name)
259 {
260         unsigned int i;
261
262         if (name == NULL) {
263                 rte_bbdev_log(ERR, "NULL driver name");
264                 return NULL;
265         }
266
267         for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
268                 struct rte_bbdev *dev = get_dev(i);
269                 if (dev && (strncmp(dev->data->name,
270                                 name, RTE_BBDEV_NAME_MAX_LEN) == 0))
271                         return dev;
272         }
273
274         return NULL;
275 }
276
277 uint16_t __rte_experimental
278 rte_bbdev_count(void)
279 {
280         return num_devs;
281 }
282
283 bool __rte_experimental
284 rte_bbdev_is_valid(uint16_t dev_id)
285 {
286         if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
287                 rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
288                 return true;
289         return false;
290 }
291
292 uint16_t __rte_experimental
293 rte_bbdev_find_next(uint16_t dev_id)
294 {
295         dev_id++;
296         for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
297                 if (rte_bbdev_is_valid(dev_id))
298                         break;
299         return dev_id;
300 }
301
302 int __rte_experimental
303 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
304 {
305         unsigned int i;
306         int ret;
307         struct rte_bbdev_driver_info dev_info;
308         struct rte_bbdev *dev = get_dev(dev_id);
309         VALID_DEV_OR_RET_ERR(dev, dev_id);
310
311         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
312
313         if (dev->data->started) {
314                 rte_bbdev_log(ERR,
315                                 "Device %u cannot be configured when started",
316                                 dev_id);
317                 return -EBUSY;
318         }
319
320         /* Get device driver information to get max number of queues */
321         VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
322         memset(&dev_info, 0, sizeof(dev_info));
323         dev->dev_ops->info_get(dev, &dev_info);
324
325         if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
326                 rte_bbdev_log(ERR,
327                                 "Device %u supports 0 < N <= %u queues, not %u",
328                                 dev_id, dev_info.max_num_queues, num_queues);
329                 return -EINVAL;
330         }
331
332         /* If re-configuration, get driver to free existing internal memory */
333         if (dev->data->queues != NULL) {
334                 VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
335                 for (i = 0; i < dev->data->num_queues; i++) {
336                         int ret = dev->dev_ops->queue_release(dev, i);
337                         if (ret < 0) {
338                                 rte_bbdev_log(ERR,
339                                                 "Device %u queue %u release failed",
340                                                 dev_id, i);
341                                 return ret;
342                         }
343                 }
344                 /* Call optional device close */
345                 if (dev->dev_ops->close) {
346                         ret = dev->dev_ops->close(dev);
347                         if (ret < 0) {
348                                 rte_bbdev_log(ERR,
349                                                 "Device %u couldn't be closed",
350                                                 dev_id);
351                                 return ret;
352                         }
353                 }
354                 rte_free(dev->data->queues);
355         }
356
357         /* Allocate queue pointers */
358         dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
359                         sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
360                                 dev->data->socket_id);
361         if (dev->data->queues == NULL) {
362                 rte_bbdev_log(ERR,
363                                 "calloc of %u queues for device %u on socket %i failed",
364                                 num_queues, dev_id, dev->data->socket_id);
365                 return -ENOMEM;
366         }
367
368         dev->data->num_queues = num_queues;
369
370         /* Call optional device configuration */
371         if (dev->dev_ops->setup_queues) {
372                 ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
373                 if (ret < 0) {
374                         rte_bbdev_log(ERR,
375                                         "Device %u memory configuration failed",
376                                         dev_id);
377                         goto error;
378                 }
379         }
380
381         rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
382                         num_queues);
383         return 0;
384
385 error:
386         dev->data->num_queues = 0;
387         rte_free(dev->data->queues);
388         dev->data->queues = NULL;
389         return ret;
390 }
391
392 int __rte_experimental
393 rte_bbdev_intr_enable(uint16_t dev_id)
394 {
395         int ret;
396         struct rte_bbdev *dev = get_dev(dev_id);
397         VALID_DEV_OR_RET_ERR(dev, dev_id);
398
399         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
400
401         if (dev->data->started) {
402                 rte_bbdev_log(ERR,
403                                 "Device %u cannot be configured when started",
404                                 dev_id);
405                 return -EBUSY;
406         }
407
408         if (dev->dev_ops->intr_enable) {
409                 ret = dev->dev_ops->intr_enable(dev);
410                 if (ret < 0) {
411                         rte_bbdev_log(ERR,
412                                         "Device %u interrupts configuration failed",
413                                         dev_id);
414                         return ret;
415                 }
416                 rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
417                 return 0;
418         }
419
420         rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
421         return -ENOTSUP;
422 }
423
424 int __rte_experimental
425 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
426                 const struct rte_bbdev_queue_conf *conf)
427 {
428         int ret = 0;
429         struct rte_bbdev_driver_info dev_info;
430         struct rte_bbdev *dev = get_dev(dev_id);
431         const struct rte_bbdev_op_cap *p;
432         struct rte_bbdev_queue_conf *stored_conf;
433         const char *op_type_str;
434         VALID_DEV_OR_RET_ERR(dev, dev_id);
435
436         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
437
438         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
439
440         if (dev->data->queues[queue_id].started || dev->data->started) {
441                 rte_bbdev_log(ERR,
442                                 "Queue %u of device %u cannot be configured when started",
443                                 queue_id, dev_id);
444                 return -EBUSY;
445         }
446
447         VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
448         VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
449
450         /* Get device driver information to verify config is valid */
451         VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
452         memset(&dev_info, 0, sizeof(dev_info));
453         dev->dev_ops->info_get(dev, &dev_info);
454
455         /* Check configuration is valid */
456         if (conf != NULL) {
457                 if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
458                                 (dev_info.capabilities[0].type ==
459                                 RTE_BBDEV_OP_NONE)) {
460                         ret = 1;
461                 } else {
462                         for (p = dev_info.capabilities;
463                                         p->type != RTE_BBDEV_OP_NONE; p++) {
464                                 if (conf->op_type == p->type) {
465                                         ret = 1;
466                                         break;
467                                 }
468                         }
469                 }
470                 if (ret == 0) {
471                         rte_bbdev_log(ERR, "Invalid operation type");
472                         return -EINVAL;
473                 }
474                 if (conf->queue_size > dev_info.queue_size_lim) {
475                         rte_bbdev_log(ERR,
476                                         "Size (%u) of queue %u of device %u must be: <= %u",
477                                         conf->queue_size, queue_id, dev_id,
478                                         dev_info.queue_size_lim);
479                         return -EINVAL;
480                 }
481                 if (!rte_is_power_of_2(conf->queue_size)) {
482                         rte_bbdev_log(ERR,
483                                         "Size (%u) of queue %u of device %u must be a power of 2",
484                                         conf->queue_size, queue_id, dev_id);
485                         return -EINVAL;
486                 }
487                 if (conf->priority > dev_info.max_queue_priority) {
488                         rte_bbdev_log(ERR,
489                                         "Priority (%u) of queue %u of bdev %u must be <= %u",
490                                         conf->priority, queue_id, dev_id,
491                                         dev_info.max_queue_priority);
492                         return -EINVAL;
493                 }
494         }
495
496         /* Release existing queue (in case of queue reconfiguration) */
497         if (dev->data->queues[queue_id].queue_private != NULL) {
498                 ret = dev->dev_ops->queue_release(dev, queue_id);
499                 if (ret < 0) {
500                         rte_bbdev_log(ERR, "Device %u queue %u release failed",
501                                         dev_id, queue_id);
502                         return ret;
503                 }
504         }
505
506         /* Get driver to setup the queue */
507         ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
508                         conf : &dev_info.default_queue_conf);
509         if (ret < 0) {
510                 rte_bbdev_log(ERR,
511                                 "Device %u queue %u setup failed", dev_id,
512                                 queue_id);
513                 return ret;
514         }
515
516         /* Store configuration */
517         stored_conf = &dev->data->queues[queue_id].conf;
518         memcpy(stored_conf,
519                         (conf != NULL) ? conf : &dev_info.default_queue_conf,
520                         sizeof(*stored_conf));
521
522         op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
523         if (op_type_str == NULL)
524                 return -EINVAL;
525
526         rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
527                         dev_id, queue_id, stored_conf->queue_size, op_type_str,
528                         stored_conf->priority);
529
530         return 0;
531 }
532
533 int __rte_experimental
534 rte_bbdev_start(uint16_t dev_id)
535 {
536         int i;
537         struct rte_bbdev *dev = get_dev(dev_id);
538         VALID_DEV_OR_RET_ERR(dev, dev_id);
539
540         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
541
542         if (dev->data->started) {
543                 rte_bbdev_log_debug("Device %u is already started", dev_id);
544                 return 0;
545         }
546
547         if (dev->dev_ops->start) {
548                 int ret = dev->dev_ops->start(dev);
549                 if (ret < 0) {
550                         rte_bbdev_log(ERR, "Device %u start failed", dev_id);
551                         return ret;
552                 }
553         }
554
555         /* Store new state */
556         for (i = 0; i < dev->data->num_queues; i++)
557                 if (!dev->data->queues[i].conf.deferred_start)
558                         dev->data->queues[i].started = true;
559         dev->data->started = true;
560
561         rte_bbdev_log_debug("Started device %u", dev_id);
562         return 0;
563 }
564
565 int __rte_experimental
566 rte_bbdev_stop(uint16_t dev_id)
567 {
568         struct rte_bbdev *dev = get_dev(dev_id);
569         VALID_DEV_OR_RET_ERR(dev, dev_id);
570
571         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
572
573         if (!dev->data->started) {
574                 rte_bbdev_log_debug("Device %u is already stopped", dev_id);
575                 return 0;
576         }
577
578         if (dev->dev_ops->stop)
579                 dev->dev_ops->stop(dev);
580         dev->data->started = false;
581
582         rte_bbdev_log_debug("Stopped device %u", dev_id);
583         return 0;
584 }
585
586 int __rte_experimental
587 rte_bbdev_close(uint16_t dev_id)
588 {
589         int ret;
590         uint16_t i;
591         struct rte_bbdev *dev = get_dev(dev_id);
592         VALID_DEV_OR_RET_ERR(dev, dev_id);
593
594         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
595
596         if (dev->data->started) {
597                 ret = rte_bbdev_stop(dev_id);
598                 if (ret < 0) {
599                         rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
600                         return ret;
601                 }
602         }
603
604         /* Free memory used by queues */
605         for (i = 0; i < dev->data->num_queues; i++) {
606                 ret = dev->dev_ops->queue_release(dev, i);
607                 if (ret < 0) {
608                         rte_bbdev_log(ERR, "Device %u queue %u release failed",
609                                         dev_id, i);
610                         return ret;
611                 }
612         }
613         rte_free(dev->data->queues);
614
615         if (dev->dev_ops->close) {
616                 ret = dev->dev_ops->close(dev);
617                 if (ret < 0) {
618                         rte_bbdev_log(ERR, "Device %u close failed", dev_id);
619                         return ret;
620                 }
621         }
622
623         /* Clear configuration */
624         dev->data->queues = NULL;
625         dev->data->num_queues = 0;
626
627         rte_bbdev_log_debug("Closed device %u", dev_id);
628         return 0;
629 }
630
631 int __rte_experimental
632 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
633 {
634         struct rte_bbdev *dev = get_dev(dev_id);
635         VALID_DEV_OR_RET_ERR(dev, dev_id);
636
637         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
638
639         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
640
641         if (dev->data->queues[queue_id].started) {
642                 rte_bbdev_log_debug("Queue %u of device %u already started",
643                                 queue_id, dev_id);
644                 return 0;
645         }
646
647         if (dev->dev_ops->queue_start) {
648                 int ret = dev->dev_ops->queue_start(dev, queue_id);
649                 if (ret < 0) {
650                         rte_bbdev_log(ERR, "Device %u queue %u start failed",
651                                         dev_id, queue_id);
652                         return ret;
653                 }
654         }
655         dev->data->queues[queue_id].started = true;
656
657         rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
658         return 0;
659 }
660
661 int __rte_experimental
662 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
663 {
664         struct rte_bbdev *dev = get_dev(dev_id);
665         VALID_DEV_OR_RET_ERR(dev, dev_id);
666
667         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
668
669         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
670
671         if (!dev->data->queues[queue_id].started) {
672                 rte_bbdev_log_debug("Queue %u of device %u already stopped",
673                                 queue_id, dev_id);
674                 return 0;
675         }
676
677         if (dev->dev_ops->queue_stop) {
678                 int ret = dev->dev_ops->queue_stop(dev, queue_id);
679                 if (ret < 0) {
680                         rte_bbdev_log(ERR, "Device %u queue %u stop failed",
681                                         dev_id, queue_id);
682                         return ret;
683                 }
684         }
685         dev->data->queues[queue_id].started = false;
686
687         rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
688         return 0;
689 }
690
691 /* Get device statistics */
692 static void
693 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
694 {
695         unsigned int q_id;
696         for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
697                 struct rte_bbdev_stats *q_stats =
698                                 &dev->data->queues[q_id].queue_stats;
699
700                 stats->enqueued_count += q_stats->enqueued_count;
701                 stats->dequeued_count += q_stats->dequeued_count;
702                 stats->enqueue_err_count += q_stats->enqueue_err_count;
703                 stats->dequeue_err_count += q_stats->dequeue_err_count;
704         }
705         rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
706 }
707
708 static void
709 reset_stats_in_queues(struct rte_bbdev *dev)
710 {
711         unsigned int q_id;
712         for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
713                 struct rte_bbdev_stats *q_stats =
714                                 &dev->data->queues[q_id].queue_stats;
715
716                 memset(q_stats, 0, sizeof(*q_stats));
717         }
718         rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
719 }
720
721 int __rte_experimental
722 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
723 {
724         struct rte_bbdev *dev = get_dev(dev_id);
725         VALID_DEV_OR_RET_ERR(dev, dev_id);
726
727         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
728
729         if (stats == NULL) {
730                 rte_bbdev_log(ERR, "NULL stats structure");
731                 return -EINVAL;
732         }
733
734         memset(stats, 0, sizeof(*stats));
735         if (dev->dev_ops->stats_get != NULL)
736                 dev->dev_ops->stats_get(dev, stats);
737         else
738                 get_stats_from_queues(dev, stats);
739
740         rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
741         return 0;
742 }
743
744 int __rte_experimental
745 rte_bbdev_stats_reset(uint16_t dev_id)
746 {
747         struct rte_bbdev *dev = get_dev(dev_id);
748         VALID_DEV_OR_RET_ERR(dev, dev_id);
749
750         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
751
752         if (dev->dev_ops->stats_reset != NULL)
753                 dev->dev_ops->stats_reset(dev);
754         else
755                 reset_stats_in_queues(dev);
756
757         rte_bbdev_log_debug("Reset stats of device %u", dev_id);
758         return 0;
759 }
760
761 int __rte_experimental
762 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
763 {
764         struct rte_bbdev *dev = get_dev(dev_id);
765         VALID_DEV_OR_RET_ERR(dev, dev_id);
766
767         VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
768
769         if (dev_info == NULL) {
770                 rte_bbdev_log(ERR, "NULL dev info structure");
771                 return -EINVAL;
772         }
773
774         /* Copy data maintained by device interface layer */
775         memset(dev_info, 0, sizeof(*dev_info));
776         dev_info->dev_name = dev->data->name;
777         dev_info->num_queues = dev->data->num_queues;
778         dev_info->bus = rte_bus_find_by_device(dev->device);
779         dev_info->socket_id = dev->data->socket_id;
780         dev_info->started = dev->data->started;
781
782         /* Copy data maintained by device driver layer */
783         dev->dev_ops->info_get(dev, &dev_info->drv);
784
785         rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
786         return 0;
787 }
788
789 int __rte_experimental
790 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
791                 struct rte_bbdev_queue_info *queue_info)
792 {
793         struct rte_bbdev *dev = get_dev(dev_id);
794         VALID_DEV_OR_RET_ERR(dev, dev_id);
795
796         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
797
798         if (queue_info == NULL) {
799                 rte_bbdev_log(ERR, "NULL queue info structure");
800                 return -EINVAL;
801         }
802
803         /* Copy data to output */
804         memset(queue_info, 0, sizeof(*queue_info));
805         queue_info->conf = dev->data->queues[queue_id].conf;
806         queue_info->started = dev->data->queues[queue_id].started;
807
808         rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
809                         queue_id, dev_id);
810         return 0;
811 }
812
813 /* Calculate size needed to store bbdev_op, depending on type */
814 static unsigned int
815 get_bbdev_op_size(enum rte_bbdev_op_type type)
816 {
817         unsigned int result = 0;
818         switch (type) {
819         case RTE_BBDEV_OP_NONE:
820                 result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
821                                 sizeof(struct rte_bbdev_enc_op));
822                 break;
823         case RTE_BBDEV_OP_TURBO_DEC:
824                 result = sizeof(struct rte_bbdev_dec_op);
825                 break;
826         case RTE_BBDEV_OP_TURBO_ENC:
827                 result = sizeof(struct rte_bbdev_enc_op);
828                 break;
829         default:
830                 break;
831         }
832
833         return result;
834 }
835
836 /* Initialise a bbdev_op structure */
837 static void
838 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
839                 __rte_unused unsigned int n)
840 {
841         enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
842
843         if (type == RTE_BBDEV_OP_TURBO_DEC) {
844                 struct rte_bbdev_dec_op *op = element;
845                 memset(op, 0, mempool->elt_size);
846                 op->mempool = mempool;
847         } else if (type == RTE_BBDEV_OP_TURBO_ENC) {
848                 struct rte_bbdev_enc_op *op = element;
849                 memset(op, 0, mempool->elt_size);
850                 op->mempool = mempool;
851         }
852 }
853
854 struct rte_mempool * __rte_experimental
855 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
856                 unsigned int num_elements, unsigned int cache_size,
857                 int socket_id)
858 {
859         struct rte_bbdev_op_pool_private *priv;
860         struct rte_mempool *mp;
861         const char *op_type_str;
862
863         if (name == NULL) {
864                 rte_bbdev_log(ERR, "NULL name for op pool");
865                 return NULL;
866         }
867
868         if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
869                 rte_bbdev_log(ERR,
870                                 "Invalid op type (%u), should be less than %u",
871                                 type, RTE_BBDEV_OP_TYPE_COUNT);
872                 return NULL;
873         }
874
875         mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
876                         cache_size, sizeof(struct rte_bbdev_op_pool_private),
877                         NULL, NULL, bbdev_op_init, &type, socket_id, 0);
878         if (mp == NULL) {
879                 rte_bbdev_log(ERR,
880                                 "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
881                                 name, num_elements, get_bbdev_op_size(type),
882                                 rte_strerror(rte_errno));
883                 return NULL;
884         }
885
886         op_type_str = rte_bbdev_op_type_str(type);
887         if (op_type_str == NULL)
888                 return NULL;
889
890         rte_bbdev_log_debug(
891                         "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
892                         name, num_elements, op_type_str, cache_size, socket_id,
893                         get_bbdev_op_size(type));
894
895         priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
896         priv->type = type;
897
898         return mp;
899 }
900
901 int __rte_experimental
902 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
903                 rte_bbdev_cb_fn cb_fn, void *cb_arg)
904 {
905         struct rte_bbdev_callback *user_cb;
906         struct rte_bbdev *dev = get_dev(dev_id);
907         VALID_DEV_OR_RET_ERR(dev, dev_id);
908
909         if (event >= RTE_BBDEV_EVENT_MAX) {
910                 rte_bbdev_log(ERR,
911                                 "Invalid event type (%u), should be less than %u",
912                                 event, RTE_BBDEV_EVENT_MAX);
913                 return -EINVAL;
914         }
915
916         if (cb_fn == NULL) {
917                 rte_bbdev_log(ERR, "NULL callback function");
918                 return -EINVAL;
919         }
920
921         rte_spinlock_lock(&rte_bbdev_cb_lock);
922
923         TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
924                 if (user_cb->cb_fn == cb_fn &&
925                                 user_cb->cb_arg == cb_arg &&
926                                 user_cb->event == event)
927                         break;
928         }
929
930         /* create a new callback. */
931         if (user_cb == NULL) {
932                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
933                                 sizeof(struct rte_bbdev_callback), 0);
934                 if (user_cb != NULL) {
935                         user_cb->cb_fn = cb_fn;
936                         user_cb->cb_arg = cb_arg;
937                         user_cb->event = event;
938                         TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
939                 }
940         }
941
942         rte_spinlock_unlock(&rte_bbdev_cb_lock);
943         return (user_cb == NULL) ? -ENOMEM : 0;
944 }
945
946 int __rte_experimental
947 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
948                 rte_bbdev_cb_fn cb_fn, void *cb_arg)
949 {
950         int ret = 0;
951         struct rte_bbdev_callback *cb, *next;
952         struct rte_bbdev *dev = get_dev(dev_id);
953         VALID_DEV_OR_RET_ERR(dev, dev_id);
954
955         if (event >= RTE_BBDEV_EVENT_MAX) {
956                 rte_bbdev_log(ERR,
957                                 "Invalid event type (%u), should be less than %u",
958                                 event, RTE_BBDEV_EVENT_MAX);
959                 return -EINVAL;
960         }
961
962         if (cb_fn == NULL) {
963                 rte_bbdev_log(ERR,
964                                 "NULL callback function cannot be unregistered");
965                 return -EINVAL;
966         }
967
968         dev = &rte_bbdev_devices[dev_id];
969         rte_spinlock_lock(&rte_bbdev_cb_lock);
970
971         for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
972
973                 next = TAILQ_NEXT(cb, next);
974
975                 if (cb->cb_fn != cb_fn || cb->event != event ||
976                                 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
977                         continue;
978
979                 /* If this callback is not executing right now, remove it. */
980                 if (cb->active == 0) {
981                         TAILQ_REMOVE(&(dev->list_cbs), cb, next);
982                         rte_free(cb);
983                 } else
984                         ret = -EAGAIN;
985         }
986
987         rte_spinlock_unlock(&rte_bbdev_cb_lock);
988         return ret;
989 }
990
991 void __rte_experimental
992 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
993         enum rte_bbdev_event_type event, void *ret_param)
994 {
995         struct rte_bbdev_callback *cb_lst;
996         struct rte_bbdev_callback dev_cb;
997
998         if (dev == NULL) {
999                 rte_bbdev_log(ERR, "NULL device");
1000                 return;
1001         }
1002
1003         if (dev->data == NULL) {
1004                 rte_bbdev_log(ERR, "NULL data structure");
1005                 return;
1006         }
1007
1008         if (event >= RTE_BBDEV_EVENT_MAX) {
1009                 rte_bbdev_log(ERR,
1010                                 "Invalid event type (%u), should be less than %u",
1011                                 event, RTE_BBDEV_EVENT_MAX);
1012                 return;
1013         }
1014
1015         rte_spinlock_lock(&rte_bbdev_cb_lock);
1016         TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1017                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1018                         continue;
1019                 dev_cb = *cb_lst;
1020                 cb_lst->active = 1;
1021                 if (ret_param != NULL)
1022                         dev_cb.ret_param = ret_param;
1023
1024                 rte_spinlock_unlock(&rte_bbdev_cb_lock);
1025                 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1026                                 dev_cb.cb_arg, dev_cb.ret_param);
1027                 rte_spinlock_lock(&rte_bbdev_cb_lock);
1028                 cb_lst->active = 0;
1029         }
1030         rte_spinlock_unlock(&rte_bbdev_cb_lock);
1031 }
1032
1033 int __rte_experimental
1034 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1035 {
1036         struct rte_bbdev *dev = get_dev(dev_id);
1037         VALID_DEV_OR_RET_ERR(dev, dev_id);
1038         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1039         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1040         VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1041         return dev->dev_ops->queue_intr_enable(dev, queue_id);
1042 }
1043
1044 int __rte_experimental
1045 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1046 {
1047         struct rte_bbdev *dev = get_dev(dev_id);
1048         VALID_DEV_OR_RET_ERR(dev, dev_id);
1049         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1050         VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1051         VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1052         return dev->dev_ops->queue_intr_disable(dev, queue_id);
1053 }
1054
1055 int __rte_experimental
1056 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1057                 void *data)
1058 {
1059         uint32_t vec;
1060         struct rte_bbdev *dev = get_dev(dev_id);
1061         struct rte_intr_handle *intr_handle;
1062         int ret;
1063
1064         VALID_DEV_OR_RET_ERR(dev, dev_id);
1065         VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1066
1067         intr_handle = dev->intr_handle;
1068         if (!intr_handle || !intr_handle->intr_vec) {
1069                 rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1070                 return -ENOTSUP;
1071         }
1072
1073         if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1074                 rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1075                                 dev_id, queue_id);
1076                 return -ENOTSUP;
1077         }
1078
1079         vec = intr_handle->intr_vec[queue_id];
1080         ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1081         if (ret && (ret != -EEXIST)) {
1082                 rte_bbdev_log(ERR,
1083                                 "dev %u q %u int ctl error op %d epfd %d vec %u\n",
1084                                 dev_id, queue_id, op, epfd, vec);
1085                 return ret;
1086         }
1087
1088         return 0;
1089 }
1090
1091
1092 const char * __rte_experimental
1093 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1094 {
1095         static const char * const op_types[] = {
1096                 "RTE_BBDEV_OP_NONE",
1097                 "RTE_BBDEV_OP_TURBO_DEC",
1098                 "RTE_BBDEV_OP_TURBO_ENC",
1099         };
1100
1101         if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1102                 return op_types[op_type];
1103
1104         rte_bbdev_log(ERR, "Invalid operation type");
1105         return NULL;
1106 }
1107
1108
1109 int bbdev_logtype;
1110
1111 RTE_INIT(rte_bbdev_init_log);
1112 static void
1113 rte_bbdev_init_log(void)
1114 {
1115         bbdev_logtype = rte_log_register("lib.bbdev");
1116         if (bbdev_logtype >= 0)
1117                 rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);
1118 }