eventdev/crypto: store operations in circular buffer
[dpdk.git] / lib / eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
30
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32  * iterations of eca_crypto_adapter_enq_run()
33  */
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
35
36 struct crypto_ops_circular_buffer {
37         /* index of head element in circular buffer */
38         uint16_t head;
39         /* index of tail element in circular buffer */
40         uint16_t tail;
41         /* number of elements in buffer */
42         uint16_t count;
43         /* size of circular buffer */
44         uint16_t size;
45         /* Pointer to hold rte_crypto_ops for batching */
46         struct rte_crypto_op **op_buffer;
47 } __rte_cache_aligned;
48
49 struct event_crypto_adapter {
50         /* Event device identifier */
51         uint8_t eventdev_id;
52         /* Event port identifier */
53         uint8_t event_port_id;
54         /* Store event device's implicit release capability */
55         uint8_t implicit_release_disabled;
56         /* Flag to indicate backpressure at cryptodev
57          * Stop further dequeuing events from eventdev
58          */
59         bool stop_enq_to_cryptodev;
60         /* Max crypto ops processed in any service function invocation */
61         uint32_t max_nb;
62         /* Lock to serialize config updates with service function */
63         rte_spinlock_t lock;
64         /* Next crypto device to be processed */
65         uint16_t next_cdev_id;
66         /* Per crypto device structure */
67         struct crypto_device_info *cdevs;
68         /* Loop counter to flush crypto ops */
69         uint16_t transmit_loop_count;
70         /* Circular buffer for batching crypto ops to eventdev */
71         struct crypto_ops_circular_buffer ebuf;
72         /* Per instance stats structure */
73         struct rte_event_crypto_adapter_stats crypto_stats;
74         /* Configuration callback for rte_service configuration */
75         rte_event_crypto_adapter_conf_cb conf_cb;
76         /* Configuration callback argument */
77         void *conf_arg;
78         /* Set if  default_cb is being used */
79         int default_cb_arg;
80         /* Service initialization state */
81         uint8_t service_inited;
82         /* Memory allocation name */
83         char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
84         /* Socket identifier cached from eventdev */
85         int socket_id;
86         /* Per adapter EAL service */
87         uint32_t service_id;
88         /* No. of queue pairs configured */
89         uint16_t nb_qps;
90         /* Adapter mode */
91         enum rte_event_crypto_adapter_mode mode;
92 } __rte_cache_aligned;
93
94 /* Per crypto device information */
95 struct crypto_device_info {
96         /* Pointer to cryptodev */
97         struct rte_cryptodev *dev;
98         /* Pointer to queue pair info */
99         struct crypto_queue_pair_info *qpairs;
100         /* Next queue pair to be processed */
101         uint16_t next_queue_pair_id;
102         /* Set to indicate cryptodev->eventdev packet
103          * transfer uses a hardware mechanism
104          */
105         uint8_t internal_event_port;
106         /* Set to indicate processing has been started */
107         uint8_t dev_started;
108         /* If num_qpairs > 0, the start callback will
109          * be invoked if not already invoked
110          */
111         uint16_t num_qpairs;
112 } __rte_cache_aligned;
113
114 /* Per queue pair information */
115 struct crypto_queue_pair_info {
116         /* Set to indicate queue pair is enabled */
117         bool qp_enabled;
118         /* Circular buffer for batching crypto ops to cdev */
119         struct crypto_ops_circular_buffer cbuf;
120 } __rte_cache_aligned;
121
122 static struct event_crypto_adapter **event_crypto_adapter;
123
124 /* Macros to check for valid adapter */
125 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
126         if (!eca_valid_id(id)) { \
127                 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
128                 return retval; \
129         } \
130 } while (0)
131
132 static inline int
133 eca_valid_id(uint8_t id)
134 {
135         return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
136 }
137
138 static int
139 eca_init(void)
140 {
141         const char *name = "crypto_adapter_array";
142         const struct rte_memzone *mz;
143         unsigned int sz;
144
145         sz = sizeof(*event_crypto_adapter) *
146             RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
147         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
148
149         mz = rte_memzone_lookup(name);
150         if (mz == NULL) {
151                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
152                                                  RTE_CACHE_LINE_SIZE);
153                 if (mz == NULL) {
154                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
155                                         PRId32, rte_errno);
156                         return -rte_errno;
157                 }
158         }
159
160         event_crypto_adapter = mz->addr;
161         return 0;
162 }
163
164 static inline bool
165 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
166 {
167         return bufp->count >= BATCH_SIZE;
168 }
169
170 static inline bool
171 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
172 {
173         return (bufp->size - bufp->count) >= BATCH_SIZE;
174 }
175
176 static inline void
177 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
178 {
179         rte_free(bufp->op_buffer);
180 }
181
182 static inline int
183 eca_circular_buffer_init(const char *name,
184                          struct crypto_ops_circular_buffer *bufp,
185                          uint16_t sz)
186 {
187         bufp->op_buffer = rte_zmalloc(name,
188                                       sizeof(struct rte_crypto_op *) * sz,
189                                       0);
190         if (bufp->op_buffer == NULL)
191                 return -ENOMEM;
192
193         bufp->size = sz;
194         return 0;
195 }
196
197 static inline int
198 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
199                         struct rte_crypto_op *op)
200 {
201         uint16_t *tailp = &bufp->tail;
202
203         bufp->op_buffer[*tailp] = op;
204         /* circular buffer, go round */
205         *tailp = (*tailp + 1) % bufp->size;
206         bufp->count++;
207
208         return 0;
209 }
210
211 static inline int
212 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
213                                   uint8_t cdev_id, uint16_t qp_id,
214                                   uint16_t *nb_ops_flushed)
215 {
216         uint16_t n = 0;
217         uint16_t *headp = &bufp->head;
218         uint16_t *tailp = &bufp->tail;
219         struct rte_crypto_op **ops = bufp->op_buffer;
220
221         if (*tailp > *headp)
222                 n = *tailp - *headp;
223         else if (*tailp < *headp)
224                 n = bufp->size - *headp;
225         else {
226                 *nb_ops_flushed = 0;
227                 return 0;  /* buffer empty */
228         }
229
230         *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
231                                                       &ops[*headp], n);
232         bufp->count -= *nb_ops_flushed;
233         if (!bufp->count) {
234                 *headp = 0;
235                 *tailp = 0;
236         } else
237                 *headp = (*headp + *nb_ops_flushed) % bufp->size;
238
239         return *nb_ops_flushed == n ? 0 : -1;
240 }
241
242 static inline struct event_crypto_adapter *
243 eca_id_to_adapter(uint8_t id)
244 {
245         return event_crypto_adapter ?
246                 event_crypto_adapter[id] : NULL;
247 }
248
249 static int
250 eca_default_config_cb(uint8_t id, uint8_t dev_id,
251                         struct rte_event_crypto_adapter_conf *conf, void *arg)
252 {
253         struct rte_event_dev_config dev_conf;
254         struct rte_eventdev *dev;
255         uint8_t port_id;
256         int started;
257         int ret;
258         struct rte_event_port_conf *port_conf = arg;
259         struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
260
261         if (adapter == NULL)
262                 return -EINVAL;
263
264         dev = &rte_eventdevs[adapter->eventdev_id];
265         dev_conf = dev->data->dev_conf;
266
267         started = dev->data->dev_started;
268         if (started)
269                 rte_event_dev_stop(dev_id);
270         port_id = dev_conf.nb_event_ports;
271         dev_conf.nb_event_ports += 1;
272         ret = rte_event_dev_configure(dev_id, &dev_conf);
273         if (ret) {
274                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
275                 if (started) {
276                         if (rte_event_dev_start(dev_id))
277                                 return -EIO;
278                 }
279                 return ret;
280         }
281
282         ret = rte_event_port_setup(dev_id, port_id, port_conf);
283         if (ret) {
284                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
285                 return ret;
286         }
287
288         conf->event_port_id = port_id;
289         conf->max_nb = DEFAULT_MAX_NB;
290         if (started)
291                 ret = rte_event_dev_start(dev_id);
292
293         adapter->default_cb_arg = 1;
294         return ret;
295 }
296
297 int
298 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
299                                 rte_event_crypto_adapter_conf_cb conf_cb,
300                                 enum rte_event_crypto_adapter_mode mode,
301                                 void *conf_arg)
302 {
303         struct event_crypto_adapter *adapter;
304         char mem_name[CRYPTO_ADAPTER_NAME_LEN];
305         struct rte_event_dev_info dev_info;
306         int socket_id;
307         uint8_t i;
308         int ret;
309
310         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
311         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
312         if (conf_cb == NULL)
313                 return -EINVAL;
314
315         if (event_crypto_adapter == NULL) {
316                 ret = eca_init();
317                 if (ret)
318                         return ret;
319         }
320
321         adapter = eca_id_to_adapter(id);
322         if (adapter != NULL) {
323                 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
324                 return -EEXIST;
325         }
326
327         socket_id = rte_event_dev_socket_id(dev_id);
328         snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
329                  "rte_event_crypto_adapter_%d", id);
330
331         adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
332                         RTE_CACHE_LINE_SIZE, socket_id);
333         if (adapter == NULL) {
334                 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
335                 return -ENOMEM;
336         }
337
338         if (eca_circular_buffer_init("eca_edev_circular_buffer",
339                                      &adapter->ebuf,
340                                      CRYPTO_ADAPTER_BUFFER_SZ)) {
341                 RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
342                 rte_free(adapter);
343                 return -ENOMEM;
344         }
345
346         ret = rte_event_dev_info_get(dev_id, &dev_info);
347         if (ret < 0) {
348                 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
349                                  dev_id, dev_info.driver_name);
350                 eca_circular_buffer_free(&adapter->ebuf);
351                 rte_free(adapter);
352                 return ret;
353         }
354
355         adapter->implicit_release_disabled = (dev_info.event_dev_cap &
356                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
357         adapter->eventdev_id = dev_id;
358         adapter->socket_id = socket_id;
359         adapter->conf_cb = conf_cb;
360         adapter->conf_arg = conf_arg;
361         adapter->mode = mode;
362         strcpy(adapter->mem_name, mem_name);
363         adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
364                                         rte_cryptodev_count() *
365                                         sizeof(struct crypto_device_info), 0,
366                                         socket_id);
367         if (adapter->cdevs == NULL) {
368                 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
369                 eca_circular_buffer_free(&adapter->ebuf);
370                 rte_free(adapter);
371                 return -ENOMEM;
372         }
373
374         rte_spinlock_init(&adapter->lock);
375         for (i = 0; i < rte_cryptodev_count(); i++)
376                 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
377
378         event_crypto_adapter[id] = adapter;
379
380         rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
381                 mode);
382         return 0;
383 }
384
385
386 int
387 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
388                                 struct rte_event_port_conf *port_config,
389                                 enum rte_event_crypto_adapter_mode mode)
390 {
391         struct rte_event_port_conf *pc;
392         int ret;
393
394         if (port_config == NULL)
395                 return -EINVAL;
396         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
397
398         pc = rte_malloc(NULL, sizeof(*pc), 0);
399         if (pc == NULL)
400                 return -ENOMEM;
401         *pc = *port_config;
402         ret = rte_event_crypto_adapter_create_ext(id, dev_id,
403                                                   eca_default_config_cb,
404                                                   mode,
405                                                   pc);
406         if (ret)
407                 rte_free(pc);
408
409         return ret;
410 }
411
412 int
413 rte_event_crypto_adapter_free(uint8_t id)
414 {
415         struct event_crypto_adapter *adapter;
416
417         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
418
419         adapter = eca_id_to_adapter(id);
420         if (adapter == NULL)
421                 return -EINVAL;
422
423         if (adapter->nb_qps) {
424                 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
425                                 adapter->nb_qps);
426                 return -EBUSY;
427         }
428
429         rte_eventdev_trace_crypto_adapter_free(id, adapter);
430         if (adapter->default_cb_arg)
431                 rte_free(adapter->conf_arg);
432         rte_free(adapter->cdevs);
433         rte_free(adapter);
434         event_crypto_adapter[id] = NULL;
435
436         return 0;
437 }
438
439 static inline unsigned int
440 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
441                      unsigned int cnt)
442 {
443         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
444         union rte_event_crypto_metadata *m_data = NULL;
445         struct crypto_queue_pair_info *qp_info = NULL;
446         struct rte_crypto_op *crypto_op;
447         unsigned int i, n;
448         uint16_t qp_id, nb_enqueued = 0;
449         uint8_t cdev_id;
450         int ret;
451
452         ret = 0;
453         n = 0;
454         stats->event_deq_count += cnt;
455
456         for (i = 0; i < cnt; i++) {
457                 crypto_op = ev[i].event_ptr;
458                 if (crypto_op == NULL)
459                         continue;
460                 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
461                         m_data = rte_cryptodev_sym_session_get_user_data(
462                                         crypto_op->sym->session);
463                         if (m_data == NULL) {
464                                 rte_pktmbuf_free(crypto_op->sym->m_src);
465                                 rte_crypto_op_free(crypto_op);
466                                 continue;
467                         }
468
469                         cdev_id = m_data->request_info.cdev_id;
470                         qp_id = m_data->request_info.queue_pair_id;
471                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
472                         if (!qp_info->qp_enabled) {
473                                 rte_pktmbuf_free(crypto_op->sym->m_src);
474                                 rte_crypto_op_free(crypto_op);
475                                 continue;
476                         }
477                         eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
478                 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
479                                 crypto_op->private_data_offset) {
480                         m_data = (union rte_event_crypto_metadata *)
481                                  ((uint8_t *)crypto_op +
482                                         crypto_op->private_data_offset);
483                         cdev_id = m_data->request_info.cdev_id;
484                         qp_id = m_data->request_info.queue_pair_id;
485                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
486                         if (!qp_info->qp_enabled) {
487                                 rte_pktmbuf_free(crypto_op->sym->m_src);
488                                 rte_crypto_op_free(crypto_op);
489                                 continue;
490                         }
491                         eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
492                 } else {
493                         rte_pktmbuf_free(crypto_op->sym->m_src);
494                         rte_crypto_op_free(crypto_op);
495                         continue;
496                 }
497
498                 if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
499                         ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
500                                                                 cdev_id,
501                                                                 qp_id,
502                                                                 &nb_enqueued);
503                         /**
504                          * If some crypto ops failed to flush to cdev and
505                          * space for another batch is not available, stop
506                          * dequeue from eventdev momentarily
507                          */
508                         if (unlikely(ret < 0 &&
509                                 !eca_circular_buffer_space_for_batch(
510                                                         &qp_info->cbuf)))
511                                 adapter->stop_enq_to_cryptodev = true;
512                 }
513
514                 stats->crypto_enq_count += nb_enqueued;
515                 n += nb_enqueued;
516         }
517
518         return n;
519 }
520
521 static unsigned int
522 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
523                       uint8_t cdev_id, uint16_t *nb_ops_flushed)
524 {
525         struct crypto_device_info *curr_dev;
526         struct crypto_queue_pair_info *curr_queue;
527         struct rte_cryptodev *dev;
528         uint16_t nb = 0, nb_enqueued = 0;
529         uint16_t qp;
530
531         curr_dev = &adapter->cdevs[cdev_id];
532         if (unlikely(curr_dev == NULL))
533                 return 0;
534
535         dev = rte_cryptodev_pmd_get_dev(cdev_id);
536         for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
537
538                 curr_queue = &curr_dev->qpairs[qp];
539                 if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
540                         continue;
541
542                 eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
543                                                   cdev_id,
544                                                   qp,
545                                                   &nb_enqueued);
546                 *nb_ops_flushed += curr_queue->cbuf.count;
547                 nb += nb_enqueued;
548         }
549
550         return nb;
551 }
552
553 static unsigned int
554 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
555 {
556         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
557         uint8_t cdev_id;
558         uint16_t nb_enqueued = 0;
559         uint16_t nb_ops_flushed = 0;
560         uint16_t num_cdev = rte_cryptodev_count();
561
562         for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
563                 nb_enqueued += eca_crypto_cdev_flush(adapter,
564                                                     cdev_id,
565                                                     &nb_ops_flushed);
566         /**
567          * Enable dequeue from eventdev if all ops from circular
568          * buffer flushed to cdev
569          */
570         if (!nb_ops_flushed)
571                 adapter->stop_enq_to_cryptodev = false;
572
573         stats->crypto_enq_count += nb_enqueued;
574
575         return nb_enqueued;
576 }
577
578 static int
579 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
580                            unsigned int max_enq)
581 {
582         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
583         struct rte_event ev[BATCH_SIZE];
584         unsigned int nb_enq, nb_enqueued;
585         uint16_t n;
586         uint8_t event_dev_id = adapter->eventdev_id;
587         uint8_t event_port_id = adapter->event_port_id;
588
589         nb_enqueued = 0;
590         if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
591                 return 0;
592
593         if (unlikely(adapter->stop_enq_to_cryptodev)) {
594                 nb_enqueued += eca_crypto_enq_flush(adapter);
595
596                 if (unlikely(adapter->stop_enq_to_cryptodev))
597                         goto skip_event_dequeue_burst;
598         }
599
600         for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
601                 stats->event_poll_count++;
602                 n = rte_event_dequeue_burst(event_dev_id,
603                                             event_port_id, ev, BATCH_SIZE, 0);
604
605                 if (!n)
606                         break;
607
608                 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
609         }
610
611 skip_event_dequeue_burst:
612
613         if ((++adapter->transmit_loop_count &
614                 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
615                 nb_enqueued += eca_crypto_enq_flush(adapter);
616         }
617
618         return nb_enqueued;
619 }
620
621 static inline uint16_t
622 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
623                   struct rte_crypto_op **ops, uint16_t num)
624 {
625         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
626         union rte_event_crypto_metadata *m_data = NULL;
627         uint8_t event_dev_id = adapter->eventdev_id;
628         uint8_t event_port_id = adapter->event_port_id;
629         struct rte_event events[BATCH_SIZE];
630         uint16_t nb_enqueued, nb_ev;
631         uint8_t retry;
632         uint8_t i;
633
634         nb_ev = 0;
635         retry = 0;
636         nb_enqueued = 0;
637         num = RTE_MIN(num, BATCH_SIZE);
638         for (i = 0; i < num; i++) {
639                 struct rte_event *ev = &events[nb_ev++];
640
641                 m_data = NULL;
642                 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
643                         m_data = rte_cryptodev_sym_session_get_user_data(
644                                         ops[i]->sym->session);
645                 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
646                                 ops[i]->private_data_offset) {
647                         m_data = (union rte_event_crypto_metadata *)
648                                  ((uint8_t *)ops[i] +
649                                   ops[i]->private_data_offset);
650                 }
651
652                 if (unlikely(m_data == NULL)) {
653                         rte_pktmbuf_free(ops[i]->sym->m_src);
654                         rte_crypto_op_free(ops[i]);
655                         continue;
656                 }
657
658                 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
659                 ev->event_ptr = ops[i];
660                 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
661                 if (adapter->implicit_release_disabled)
662                         ev->op = RTE_EVENT_OP_FORWARD;
663                 else
664                         ev->op = RTE_EVENT_OP_NEW;
665         }
666
667         do {
668                 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
669                                                   event_port_id,
670                                                   &events[nb_enqueued],
671                                                   nb_ev - nb_enqueued);
672
673         } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
674                  nb_enqueued < nb_ev);
675
676         stats->event_enq_fail_count += nb_ev - nb_enqueued;
677         stats->event_enq_count += nb_enqueued;
678         stats->event_enq_retry_count += retry - 1;
679
680         return nb_enqueued;
681 }
682
683 static int
684 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
685                                    struct crypto_ops_circular_buffer *bufp)
686 {
687         uint16_t n = 0, nb_ops_flushed;
688         uint16_t *headp = &bufp->head;
689         uint16_t *tailp = &bufp->tail;
690         struct rte_crypto_op **ops = bufp->op_buffer;
691
692         if (*tailp > *headp)
693                 n = *tailp - *headp;
694         else if (*tailp < *headp)
695                 n = bufp->size - *headp;
696         else
697                 return 0;  /* buffer empty */
698
699         nb_ops_flushed =  eca_ops_enqueue_burst(adapter, ops, n);
700         bufp->count -= nb_ops_flushed;
701         if (!bufp->count) {
702                 *headp = 0;
703                 *tailp = 0;
704                 return 0;  /* buffer empty */
705         }
706
707         *headp = (*headp + nb_ops_flushed) % bufp->size;
708         return 1;
709 }
710
711
712 static void
713 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
714 {
715         if (likely(adapter->ebuf.count == 0))
716                 return;
717
718         while (eca_circular_buffer_flush_to_evdev(adapter,
719                                                   &adapter->ebuf))
720                 ;
721 }
722 static inline unsigned int
723 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
724                            unsigned int max_deq)
725 {
726         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
727         struct crypto_device_info *curr_dev;
728         struct crypto_queue_pair_info *curr_queue;
729         struct rte_crypto_op *ops[BATCH_SIZE];
730         uint16_t n, nb_deq, nb_enqueued, i;
731         struct rte_cryptodev *dev;
732         uint8_t cdev_id;
733         uint16_t qp, dev_qps;
734         bool done;
735         uint16_t num_cdev = rte_cryptodev_count();
736
737         nb_deq = 0;
738         eca_ops_buffer_flush(adapter);
739
740         do {
741                 done = true;
742
743                 for (cdev_id = adapter->next_cdev_id;
744                         cdev_id < num_cdev; cdev_id++) {
745                         uint16_t queues = 0;
746
747                         curr_dev = &adapter->cdevs[cdev_id];
748                         dev = curr_dev->dev;
749                         if (unlikely(dev == NULL))
750                                 continue;
751
752                         dev_qps = dev->data->nb_queue_pairs;
753
754                         for (qp = curr_dev->next_queue_pair_id;
755                                 queues < dev_qps; qp = (qp + 1) % dev_qps,
756                                 queues++) {
757
758                                 curr_queue = &curr_dev->qpairs[qp];
759                                 if (unlikely(curr_queue == NULL ||
760                                     !curr_queue->qp_enabled))
761                                         continue;
762
763                                 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
764                                         ops, BATCH_SIZE);
765                                 if (!n)
766                                         continue;
767
768                                 done = false;
769                                 nb_enqueued = 0;
770
771                                 stats->crypto_deq_count += n;
772
773                                 if (unlikely(!adapter->ebuf.count))
774                                         nb_enqueued = eca_ops_enqueue_burst(
775                                                         adapter, ops, n);
776
777                                 if (likely(nb_enqueued == n))
778                                         goto check;
779
780                                 /* Failed to enqueue events case */
781                                 for (i = nb_enqueued; i < n; i++)
782                                         eca_circular_buffer_add(
783                                                 &adapter->ebuf,
784                                                 ops[nb_enqueued]);
785
786 check:
787                                 nb_deq += n;
788
789                                 if (nb_deq >= max_deq) {
790                                         if ((qp + 1) == dev_qps) {
791                                                 adapter->next_cdev_id =
792                                                         (cdev_id + 1)
793                                                         % num_cdev;
794                                         }
795                                         curr_dev->next_queue_pair_id = (qp + 1)
796                                                 % dev->data->nb_queue_pairs;
797
798                                         return nb_deq;
799                                 }
800                         }
801                 }
802                 adapter->next_cdev_id = 0;
803         } while (done == false);
804         return nb_deq;
805 }
806
807 static void
808 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
809                        unsigned int max_ops)
810 {
811         unsigned int ops_left = max_ops;
812
813         while (ops_left > 0) {
814                 unsigned int e_cnt, d_cnt;
815
816                 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
817                 ops_left -= RTE_MIN(ops_left, e_cnt);
818
819                 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
820                 ops_left -= RTE_MIN(ops_left, d_cnt);
821
822                 if (e_cnt == 0 && d_cnt == 0)
823                         break;
824
825         }
826
827         if (ops_left == max_ops)
828                 rte_event_maintain(adapter->eventdev_id,
829                                    adapter->event_port_id, 0);
830 }
831
832 static int
833 eca_service_func(void *args)
834 {
835         struct event_crypto_adapter *adapter = args;
836
837         if (rte_spinlock_trylock(&adapter->lock) == 0)
838                 return 0;
839         eca_crypto_adapter_run(adapter, adapter->max_nb);
840         rte_spinlock_unlock(&adapter->lock);
841
842         return 0;
843 }
844
845 static int
846 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
847 {
848         struct rte_event_crypto_adapter_conf adapter_conf;
849         struct rte_service_spec service;
850         int ret;
851
852         if (adapter->service_inited)
853                 return 0;
854
855         memset(&service, 0, sizeof(service));
856         snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
857                 "rte_event_crypto_adapter_%d", id);
858         service.socket_id = adapter->socket_id;
859         service.callback = eca_service_func;
860         service.callback_userdata = adapter;
861         /* Service function handles locking for queue add/del updates */
862         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
863         ret = rte_service_component_register(&service, &adapter->service_id);
864         if (ret) {
865                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
866                         service.name, ret);
867                 return ret;
868         }
869
870         ret = adapter->conf_cb(id, adapter->eventdev_id,
871                 &adapter_conf, adapter->conf_arg);
872         if (ret) {
873                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
874                         ret);
875                 return ret;
876         }
877
878         adapter->max_nb = adapter_conf.max_nb;
879         adapter->event_port_id = adapter_conf.event_port_id;
880         adapter->service_inited = 1;
881
882         return ret;
883 }
884
885 static void
886 eca_update_qp_info(struct event_crypto_adapter *adapter,
887                    struct crypto_device_info *dev_info, int32_t queue_pair_id,
888                    uint8_t add)
889 {
890         struct crypto_queue_pair_info *qp_info;
891         int enabled;
892         uint16_t i;
893
894         if (dev_info->qpairs == NULL)
895                 return;
896
897         if (queue_pair_id == -1) {
898                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
899                         eca_update_qp_info(adapter, dev_info, i, add);
900         } else {
901                 qp_info = &dev_info->qpairs[queue_pair_id];
902                 enabled = qp_info->qp_enabled;
903                 if (add) {
904                         adapter->nb_qps += !enabled;
905                         dev_info->num_qpairs += !enabled;
906                 } else {
907                         adapter->nb_qps -= enabled;
908                         dev_info->num_qpairs -= enabled;
909                 }
910                 qp_info->qp_enabled = !!add;
911         }
912 }
913
914 static int
915 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
916                    int queue_pair_id)
917 {
918         struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
919         struct crypto_queue_pair_info *qpairs;
920         uint32_t i;
921
922         if (dev_info->qpairs == NULL) {
923                 dev_info->qpairs =
924                     rte_zmalloc_socket(adapter->mem_name,
925                                         dev_info->dev->data->nb_queue_pairs *
926                                         sizeof(struct crypto_queue_pair_info),
927                                         0, adapter->socket_id);
928                 if (dev_info->qpairs == NULL)
929                         return -ENOMEM;
930
931                 qpairs = dev_info->qpairs;
932
933                 if (eca_circular_buffer_init("eca_cdev_circular_buffer",
934                                              &qpairs->cbuf,
935                                              CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
936                         RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
937                                          "buffer");
938                         rte_free(qpairs);
939                         return -ENOMEM;
940                 }
941         }
942
943         if (queue_pair_id == -1) {
944                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
945                         eca_update_qp_info(adapter, dev_info, i, 1);
946         } else
947                 eca_update_qp_info(adapter, dev_info,
948                                         (uint16_t)queue_pair_id, 1);
949
950         return 0;
951 }
952
953 int
954 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
955                         uint8_t cdev_id,
956                         int32_t queue_pair_id,
957                         const struct rte_event *event)
958 {
959         struct event_crypto_adapter *adapter;
960         struct rte_eventdev *dev;
961         struct crypto_device_info *dev_info;
962         uint32_t cap;
963         int ret;
964
965         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
966
967         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
968                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
969                 return -EINVAL;
970         }
971
972         adapter = eca_id_to_adapter(id);
973         if (adapter == NULL)
974                 return -EINVAL;
975
976         dev = &rte_eventdevs[adapter->eventdev_id];
977         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
978                                                 cdev_id,
979                                                 &cap);
980         if (ret) {
981                 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
982                         " cdev %" PRIu8, id, cdev_id);
983                 return ret;
984         }
985
986         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
987             (event == NULL)) {
988                 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
989                                   cdev_id);
990                 return -EINVAL;
991         }
992
993         dev_info = &adapter->cdevs[cdev_id];
994
995         if (queue_pair_id != -1 &&
996             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
997                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
998                                  (uint16_t)queue_pair_id);
999                 return -EINVAL;
1000         }
1001
1002         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1003          * no need of service core as HW supports event forward capability.
1004          */
1005         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1006             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1007              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1008             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1009              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1010                 RTE_FUNC_PTR_OR_ERR_RET(
1011                         *dev->dev_ops->crypto_adapter_queue_pair_add,
1012                         -ENOTSUP);
1013                 if (dev_info->qpairs == NULL) {
1014                         dev_info->qpairs =
1015                             rte_zmalloc_socket(adapter->mem_name,
1016                                         dev_info->dev->data->nb_queue_pairs *
1017                                         sizeof(struct crypto_queue_pair_info),
1018                                         0, adapter->socket_id);
1019                         if (dev_info->qpairs == NULL)
1020                                 return -ENOMEM;
1021                 }
1022
1023                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1024                                 dev_info->dev,
1025                                 queue_pair_id,
1026                                 event);
1027                 if (ret)
1028                         return ret;
1029
1030                 else
1031                         eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1032                                            queue_pair_id, 1);
1033         }
1034
1035         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1036          * or SW adapter, initiate services so the application can choose
1037          * which ever way it wants to use the adapter.
1038          * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1039          *         Application may wants to use one of below two mode
1040          *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1041          *          b. OP_NEW mode -> HW Dequeue
1042          * Case 2: No HW caps, use SW adapter
1043          *          a. OP_FORWARD mode -> SW enqueue & dequeue
1044          *          b. OP_NEW mode -> SW Dequeue
1045          */
1046         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1047              !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1048              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1049              (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1050               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1051               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1052                (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1053                 rte_spinlock_lock(&adapter->lock);
1054                 ret = eca_init_service(adapter, id);
1055                 if (ret == 0)
1056                         ret = eca_add_queue_pair(adapter, cdev_id,
1057                                                  queue_pair_id);
1058                 rte_spinlock_unlock(&adapter->lock);
1059
1060                 if (ret)
1061                         return ret;
1062
1063                 rte_service_component_runstate_set(adapter->service_id, 1);
1064         }
1065
1066         rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
1067                 queue_pair_id);
1068         return 0;
1069 }
1070
1071 int
1072 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1073                                         int32_t queue_pair_id)
1074 {
1075         struct event_crypto_adapter *adapter;
1076         struct crypto_device_info *dev_info;
1077         struct rte_eventdev *dev;
1078         int ret;
1079         uint32_t cap;
1080         uint16_t i;
1081
1082         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1083
1084         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1085                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1086                 return -EINVAL;
1087         }
1088
1089         adapter = eca_id_to_adapter(id);
1090         if (adapter == NULL)
1091                 return -EINVAL;
1092
1093         dev = &rte_eventdevs[adapter->eventdev_id];
1094         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1095                                                 cdev_id,
1096                                                 &cap);
1097         if (ret)
1098                 return ret;
1099
1100         dev_info = &adapter->cdevs[cdev_id];
1101
1102         if (queue_pair_id != -1 &&
1103             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1104                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1105                                  (uint16_t)queue_pair_id);
1106                 return -EINVAL;
1107         }
1108
1109         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1110             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1111              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1112                 RTE_FUNC_PTR_OR_ERR_RET(
1113                         *dev->dev_ops->crypto_adapter_queue_pair_del,
1114                         -ENOTSUP);
1115                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1116                                                 dev_info->dev,
1117                                                 queue_pair_id);
1118                 if (ret == 0) {
1119                         eca_update_qp_info(adapter,
1120                                         &adapter->cdevs[cdev_id],
1121                                         queue_pair_id,
1122                                         0);
1123                         if (dev_info->num_qpairs == 0) {
1124                                 rte_free(dev_info->qpairs);
1125                                 dev_info->qpairs = NULL;
1126                         }
1127                 }
1128         } else {
1129                 if (adapter->nb_qps == 0)
1130                         return 0;
1131
1132                 rte_spinlock_lock(&adapter->lock);
1133                 if (queue_pair_id == -1) {
1134                         for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1135                                 i++)
1136                                 eca_update_qp_info(adapter, dev_info,
1137                                                         queue_pair_id, 0);
1138                 } else {
1139                         eca_update_qp_info(adapter, dev_info,
1140                                                 (uint16_t)queue_pair_id, 0);
1141                 }
1142
1143                 if (dev_info->num_qpairs == 0) {
1144                         rte_free(dev_info->qpairs);
1145                         dev_info->qpairs = NULL;
1146                 }
1147
1148                 rte_spinlock_unlock(&adapter->lock);
1149                 rte_service_component_runstate_set(adapter->service_id,
1150                                 adapter->nb_qps);
1151         }
1152
1153         rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1154                 queue_pair_id, ret);
1155         return ret;
1156 }
1157
1158 static int
1159 eca_adapter_ctrl(uint8_t id, int start)
1160 {
1161         struct event_crypto_adapter *adapter;
1162         struct crypto_device_info *dev_info;
1163         struct rte_eventdev *dev;
1164         uint32_t i;
1165         int use_service;
1166         int stop = !start;
1167
1168         use_service = 0;
1169         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1170         adapter = eca_id_to_adapter(id);
1171         if (adapter == NULL)
1172                 return -EINVAL;
1173
1174         dev = &rte_eventdevs[adapter->eventdev_id];
1175
1176         for (i = 0; i < rte_cryptodev_count(); i++) {
1177                 dev_info = &adapter->cdevs[i];
1178                 /* if start  check for num queue pairs */
1179                 if (start && !dev_info->num_qpairs)
1180                         continue;
1181                 /* if stop check if dev has been started */
1182                 if (stop && !dev_info->dev_started)
1183                         continue;
1184                 use_service |= !dev_info->internal_event_port;
1185                 dev_info->dev_started = start;
1186                 if (dev_info->internal_event_port == 0)
1187                         continue;
1188                 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1189                                                 &dev_info->dev[i]) :
1190                         (*dev->dev_ops->crypto_adapter_stop)(dev,
1191                                                 &dev_info->dev[i]);
1192         }
1193
1194         if (use_service)
1195                 rte_service_runstate_set(adapter->service_id, start);
1196
1197         return 0;
1198 }
1199
1200 int
1201 rte_event_crypto_adapter_start(uint8_t id)
1202 {
1203         struct event_crypto_adapter *adapter;
1204
1205         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1206         adapter = eca_id_to_adapter(id);
1207         if (adapter == NULL)
1208                 return -EINVAL;
1209
1210         rte_eventdev_trace_crypto_adapter_start(id, adapter);
1211         return eca_adapter_ctrl(id, 1);
1212 }
1213
1214 int
1215 rte_event_crypto_adapter_stop(uint8_t id)
1216 {
1217         rte_eventdev_trace_crypto_adapter_stop(id);
1218         return eca_adapter_ctrl(id, 0);
1219 }
1220
1221 int
1222 rte_event_crypto_adapter_stats_get(uint8_t id,
1223                                 struct rte_event_crypto_adapter_stats *stats)
1224 {
1225         struct event_crypto_adapter *adapter;
1226         struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1227         struct rte_event_crypto_adapter_stats dev_stats;
1228         struct rte_eventdev *dev;
1229         struct crypto_device_info *dev_info;
1230         uint32_t i;
1231         int ret;
1232
1233         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1234
1235         adapter = eca_id_to_adapter(id);
1236         if (adapter == NULL || stats == NULL)
1237                 return -EINVAL;
1238
1239         dev = &rte_eventdevs[adapter->eventdev_id];
1240         memset(stats, 0, sizeof(*stats));
1241         for (i = 0; i < rte_cryptodev_count(); i++) {
1242                 dev_info = &adapter->cdevs[i];
1243                 if (dev_info->internal_event_port == 0 ||
1244                         dev->dev_ops->crypto_adapter_stats_get == NULL)
1245                         continue;
1246                 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1247                                                 dev_info->dev,
1248                                                 &dev_stats);
1249                 if (ret)
1250                         continue;
1251
1252                 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1253                 dev_stats_sum.event_enq_count +=
1254                         dev_stats.event_enq_count;
1255         }
1256
1257         if (adapter->service_inited)
1258                 *stats = adapter->crypto_stats;
1259
1260         stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1261         stats->event_enq_count += dev_stats_sum.event_enq_count;
1262
1263         return 0;
1264 }
1265
1266 int
1267 rte_event_crypto_adapter_stats_reset(uint8_t id)
1268 {
1269         struct event_crypto_adapter *adapter;
1270         struct crypto_device_info *dev_info;
1271         struct rte_eventdev *dev;
1272         uint32_t i;
1273
1274         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1275
1276         adapter = eca_id_to_adapter(id);
1277         if (adapter == NULL)
1278                 return -EINVAL;
1279
1280         dev = &rte_eventdevs[adapter->eventdev_id];
1281         for (i = 0; i < rte_cryptodev_count(); i++) {
1282                 dev_info = &adapter->cdevs[i];
1283                 if (dev_info->internal_event_port == 0 ||
1284                         dev->dev_ops->crypto_adapter_stats_reset == NULL)
1285                         continue;
1286                 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1287                                                 dev_info->dev);
1288         }
1289
1290         memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1291         return 0;
1292 }
1293
1294 int
1295 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1296 {
1297         struct event_crypto_adapter *adapter;
1298
1299         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1300
1301         adapter = eca_id_to_adapter(id);
1302         if (adapter == NULL || service_id == NULL)
1303                 return -EINVAL;
1304
1305         if (adapter->service_inited)
1306                 *service_id = adapter->service_id;
1307
1308         return adapter->service_inited ? 0 : -ESRCH;
1309 }
1310
1311 int
1312 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1313 {
1314         struct event_crypto_adapter *adapter;
1315
1316         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1317
1318         adapter = eca_id_to_adapter(id);
1319         if (adapter == NULL || event_port_id == NULL)
1320                 return -EINVAL;
1321
1322         *event_port_id = adapter->event_port_id;
1323
1324         return 0;
1325 }