mbuf: extend meaning of QinQ stripped bit
[dpdk.git] / lib / librte_eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29  * iterations of eca_crypto_adapter_enq_run()
30  */
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32
33 struct rte_event_crypto_adapter {
34         /* Event device identifier */
35         uint8_t eventdev_id;
36         /* Event port identifier */
37         uint8_t event_port_id;
38         /* Store event device's implicit release capability */
39         uint8_t implicit_release_disabled;
40         /* Max crypto ops processed in any service function invocation */
41         uint32_t max_nb;
42         /* Lock to serialize config updates with service function */
43         rte_spinlock_t lock;
44         /* Next crypto device to be processed */
45         uint16_t next_cdev_id;
46         /* Per crypto device structure */
47         struct crypto_device_info *cdevs;
48         /* Loop counter to flush crypto ops */
49         uint16_t transmit_loop_count;
50         /* Per instance stats structure */
51         struct rte_event_crypto_adapter_stats crypto_stats;
52         /* Configuration callback for rte_service configuration */
53         rte_event_crypto_adapter_conf_cb conf_cb;
54         /* Configuration callback argument */
55         void *conf_arg;
56         /* Set if  default_cb is being used */
57         int default_cb_arg;
58         /* Service initialization state */
59         uint8_t service_inited;
60         /* Memory allocation name */
61         char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62         /* Socket identifier cached from eventdev */
63         int socket_id;
64         /* Per adapter EAL service */
65         uint32_t service_id;
66         /* No. of queue pairs configured */
67         uint16_t nb_qps;
68         /* Adapter mode */
69         enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
71
72 /* Per crypto device information */
73 struct crypto_device_info {
74         /* Pointer to cryptodev */
75         struct rte_cryptodev *dev;
76         /* Pointer to queue pair info */
77         struct crypto_queue_pair_info *qpairs;
78         /* Next queue pair to be processed */
79         uint16_t next_queue_pair_id;
80         /* Set to indicate cryptodev->eventdev packet
81          * transfer uses a hardware mechanism
82          */
83         uint8_t internal_event_port;
84         /* Set to indicate processing has been started */
85         uint8_t dev_started;
86         /* If num_qpairs > 0, the start callback will
87          * be invoked if not already invoked
88          */
89         uint16_t num_qpairs;
90 } __rte_cache_aligned;
91
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94         /* Set to indicate queue pair is enabled */
95         bool qp_enabled;
96         /* Pointer to hold rte_crypto_ops for batching */
97         struct rte_crypto_op **op_buffer;
98         /* No of crypto ops accumulated */
99         uint8_t len;
100 } __rte_cache_aligned;
101
102 static struct rte_event_crypto_adapter **event_crypto_adapter;
103
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106         if (!eca_valid_id(id)) { \
107                 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
108                 return retval; \
109         } \
110 } while (0)
111
112 static inline int
113 eca_valid_id(uint8_t id)
114 {
115         return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
116 }
117
118 static int
119 eca_init(void)
120 {
121         const char *name = "crypto_adapter_array";
122         const struct rte_memzone *mz;
123         unsigned int sz;
124
125         sz = sizeof(*event_crypto_adapter) *
126             RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
128
129         mz = rte_memzone_lookup(name);
130         if (mz == NULL) {
131                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132                                                  RTE_CACHE_LINE_SIZE);
133                 if (mz == NULL) {
134                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
135                                         PRId32, rte_errno);
136                         return -rte_errno;
137                 }
138         }
139
140         event_crypto_adapter = mz->addr;
141         return 0;
142 }
143
144 static inline struct rte_event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
146 {
147         return event_crypto_adapter ?
148                 event_crypto_adapter[id] : NULL;
149 }
150
151 static int
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153                         struct rte_event_crypto_adapter_conf *conf, void *arg)
154 {
155         struct rte_event_dev_config dev_conf;
156         struct rte_eventdev *dev;
157         uint8_t port_id;
158         int started;
159         int ret;
160         struct rte_event_port_conf *port_conf = arg;
161         struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
162
163         if (adapter == NULL)
164                 return -EINVAL;
165
166         dev = &rte_eventdevs[adapter->eventdev_id];
167         dev_conf = dev->data->dev_conf;
168
169         started = dev->data->dev_started;
170         if (started)
171                 rte_event_dev_stop(dev_id);
172         port_id = dev_conf.nb_event_ports;
173         dev_conf.nb_event_ports += 1;
174         ret = rte_event_dev_configure(dev_id, &dev_conf);
175         if (ret) {
176                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
177                 if (started) {
178                         if (rte_event_dev_start(dev_id))
179                                 return -EIO;
180                 }
181                 return ret;
182         }
183
184         ret = rte_event_port_setup(dev_id, port_id, port_conf);
185         if (ret) {
186                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
187                 return ret;
188         }
189
190         conf->event_port_id = port_id;
191         conf->max_nb = DEFAULT_MAX_NB;
192         if (started)
193                 ret = rte_event_dev_start(dev_id);
194
195         adapter->default_cb_arg = 1;
196         return ret;
197 }
198
199 int
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201                                 rte_event_crypto_adapter_conf_cb conf_cb,
202                                 enum rte_event_crypto_adapter_mode mode,
203                                 void *conf_arg)
204 {
205         struct rte_event_crypto_adapter *adapter;
206         char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207         struct rte_event_dev_info dev_info;
208         int socket_id;
209         uint8_t i;
210         int ret;
211
212         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
214         if (conf_cb == NULL)
215                 return -EINVAL;
216
217         if (event_crypto_adapter == NULL) {
218                 ret = eca_init();
219                 if (ret)
220                         return ret;
221         }
222
223         adapter = eca_id_to_adapter(id);
224         if (adapter != NULL) {
225                 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
226                 return -EEXIST;
227         }
228
229         socket_id = rte_event_dev_socket_id(dev_id);
230         snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231                  "rte_event_crypto_adapter_%d", id);
232
233         adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234                         RTE_CACHE_LINE_SIZE, socket_id);
235         if (adapter == NULL) {
236                 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
237                 return -ENOMEM;
238         }
239
240         ret = rte_event_dev_info_get(dev_id, &dev_info);
241         if (ret < 0) {
242                 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243                                  dev_id, dev_info.driver_name);
244                 return ret;
245         }
246
247         adapter->implicit_release_disabled = (dev_info.event_dev_cap &
248                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
249         adapter->eventdev_id = dev_id;
250         adapter->socket_id = socket_id;
251         adapter->conf_cb = conf_cb;
252         adapter->conf_arg = conf_arg;
253         adapter->mode = mode;
254         strcpy(adapter->mem_name, mem_name);
255         adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
256                                         rte_cryptodev_count() *
257                                         sizeof(struct crypto_device_info), 0,
258                                         socket_id);
259         if (adapter->cdevs == NULL) {
260                 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
261                 rte_free(adapter);
262                 return -ENOMEM;
263         }
264
265         rte_spinlock_init(&adapter->lock);
266         for (i = 0; i < rte_cryptodev_count(); i++)
267                 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
268
269         event_crypto_adapter[id] = adapter;
270
271         rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
272                 mode);
273         return 0;
274 }
275
276
277 int
278 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
279                                 struct rte_event_port_conf *port_config,
280                                 enum rte_event_crypto_adapter_mode mode)
281 {
282         struct rte_event_port_conf *pc;
283         int ret;
284
285         if (port_config == NULL)
286                 return -EINVAL;
287         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
288
289         pc = rte_malloc(NULL, sizeof(*pc), 0);
290         if (pc == NULL)
291                 return -ENOMEM;
292         *pc = *port_config;
293         ret = rte_event_crypto_adapter_create_ext(id, dev_id,
294                                                   eca_default_config_cb,
295                                                   mode,
296                                                   pc);
297         if (ret)
298                 rte_free(pc);
299
300         return ret;
301 }
302
303 int
304 rte_event_crypto_adapter_free(uint8_t id)
305 {
306         struct rte_event_crypto_adapter *adapter;
307
308         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
309
310         adapter = eca_id_to_adapter(id);
311         if (adapter == NULL)
312                 return -EINVAL;
313
314         if (adapter->nb_qps) {
315                 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
316                                 adapter->nb_qps);
317                 return -EBUSY;
318         }
319
320         rte_eventdev_trace_crypto_adapter_free(id, adapter);
321         if (adapter->default_cb_arg)
322                 rte_free(adapter->conf_arg);
323         rte_free(adapter->cdevs);
324         rte_free(adapter);
325         event_crypto_adapter[id] = NULL;
326
327         return 0;
328 }
329
330 static inline unsigned int
331 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
332                  struct rte_event *ev, unsigned int cnt)
333 {
334         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
335         union rte_event_crypto_metadata *m_data = NULL;
336         struct crypto_queue_pair_info *qp_info = NULL;
337         struct rte_crypto_op *crypto_op;
338         unsigned int i, n;
339         uint16_t qp_id, len, ret;
340         uint8_t cdev_id;
341
342         len = 0;
343         ret = 0;
344         n = 0;
345         stats->event_deq_count += cnt;
346
347         for (i = 0; i < cnt; i++) {
348                 crypto_op = ev[i].event_ptr;
349                 if (crypto_op == NULL)
350                         continue;
351                 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
352                         m_data = rte_cryptodev_sym_session_get_user_data(
353                                         crypto_op->sym->session);
354                         if (m_data == NULL) {
355                                 rte_pktmbuf_free(crypto_op->sym->m_src);
356                                 rte_crypto_op_free(crypto_op);
357                                 continue;
358                         }
359
360                         cdev_id = m_data->request_info.cdev_id;
361                         qp_id = m_data->request_info.queue_pair_id;
362                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
363                         if (!qp_info->qp_enabled) {
364                                 rte_pktmbuf_free(crypto_op->sym->m_src);
365                                 rte_crypto_op_free(crypto_op);
366                                 continue;
367                         }
368                         len = qp_info->len;
369                         qp_info->op_buffer[len] = crypto_op;
370                         len++;
371                 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
372                                 crypto_op->private_data_offset) {
373                         m_data = (union rte_event_crypto_metadata *)
374                                  ((uint8_t *)crypto_op +
375                                         crypto_op->private_data_offset);
376                         cdev_id = m_data->request_info.cdev_id;
377                         qp_id = m_data->request_info.queue_pair_id;
378                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
379                         if (!qp_info->qp_enabled) {
380                                 rte_pktmbuf_free(crypto_op->sym->m_src);
381                                 rte_crypto_op_free(crypto_op);
382                                 continue;
383                         }
384                         len = qp_info->len;
385                         qp_info->op_buffer[len] = crypto_op;
386                         len++;
387                 } else {
388                         rte_pktmbuf_free(crypto_op->sym->m_src);
389                         rte_crypto_op_free(crypto_op);
390                         continue;
391                 }
392
393                 if (len == BATCH_SIZE) {
394                         struct rte_crypto_op **op_buffer = qp_info->op_buffer;
395                         ret = rte_cryptodev_enqueue_burst(cdev_id,
396                                                           qp_id,
397                                                           op_buffer,
398                                                           BATCH_SIZE);
399
400                         stats->crypto_enq_count += ret;
401
402                         while (ret < len) {
403                                 struct rte_crypto_op *op;
404                                 op = op_buffer[ret++];
405                                 stats->crypto_enq_fail++;
406                                 rte_pktmbuf_free(op->sym->m_src);
407                                 rte_crypto_op_free(op);
408                         }
409
410                         len = 0;
411                 }
412
413                 if (qp_info)
414                         qp_info->len = len;
415                 n += ret;
416         }
417
418         return n;
419 }
420
421 static unsigned int
422 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
423 {
424         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
425         struct crypto_device_info *curr_dev;
426         struct crypto_queue_pair_info *curr_queue;
427         struct rte_crypto_op **op_buffer;
428         struct rte_cryptodev *dev;
429         uint8_t cdev_id;
430         uint16_t qp;
431         uint16_t ret;
432         uint16_t num_cdev = rte_cryptodev_count();
433
434         ret = 0;
435         for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
436                 curr_dev = &adapter->cdevs[cdev_id];
437                 dev = curr_dev->dev;
438                 if (dev == NULL)
439                         continue;
440                 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
441
442                         curr_queue = &curr_dev->qpairs[qp];
443                         if (!curr_queue->qp_enabled)
444                                 continue;
445
446                         op_buffer = curr_queue->op_buffer;
447                         ret = rte_cryptodev_enqueue_burst(cdev_id,
448                                                           qp,
449                                                           op_buffer,
450                                                           curr_queue->len);
451                         stats->crypto_enq_count += ret;
452
453                         while (ret < curr_queue->len) {
454                                 struct rte_crypto_op *op;
455                                 op = op_buffer[ret++];
456                                 stats->crypto_enq_fail++;
457                                 rte_pktmbuf_free(op->sym->m_src);
458                                 rte_crypto_op_free(op);
459                         }
460                         curr_queue->len = 0;
461                 }
462         }
463
464         return ret;
465 }
466
467 static int
468 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
469                         unsigned int max_enq)
470 {
471         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
472         struct rte_event ev[BATCH_SIZE];
473         unsigned int nb_enq, nb_enqueued;
474         uint16_t n;
475         uint8_t event_dev_id = adapter->eventdev_id;
476         uint8_t event_port_id = adapter->event_port_id;
477
478         nb_enqueued = 0;
479         if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
480                 return 0;
481
482         for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
483                 stats->event_poll_count++;
484                 n = rte_event_dequeue_burst(event_dev_id,
485                                             event_port_id, ev, BATCH_SIZE, 0);
486
487                 if (!n)
488                         break;
489
490                 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
491         }
492
493         if ((++adapter->transmit_loop_count &
494                 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
495                 nb_enqueued += eca_crypto_enq_flush(adapter);
496         }
497
498         return nb_enqueued;
499 }
500
501 static inline void
502 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
503                   struct rte_crypto_op **ops, uint16_t num)
504 {
505         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
506         union rte_event_crypto_metadata *m_data = NULL;
507         uint8_t event_dev_id = adapter->eventdev_id;
508         uint8_t event_port_id = adapter->event_port_id;
509         struct rte_event events[BATCH_SIZE];
510         uint16_t nb_enqueued, nb_ev;
511         uint8_t retry;
512         uint8_t i;
513
514         nb_ev = 0;
515         retry = 0;
516         nb_enqueued = 0;
517         num = RTE_MIN(num, BATCH_SIZE);
518         for (i = 0; i < num; i++) {
519                 struct rte_event *ev = &events[nb_ev++];
520                 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
521                         m_data = rte_cryptodev_sym_session_get_user_data(
522                                         ops[i]->sym->session);
523                 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
524                                 ops[i]->private_data_offset) {
525                         m_data = (union rte_event_crypto_metadata *)
526                                  ((uint8_t *)ops[i] +
527                                   ops[i]->private_data_offset);
528                 }
529
530                 if (unlikely(m_data == NULL)) {
531                         rte_pktmbuf_free(ops[i]->sym->m_src);
532                         rte_crypto_op_free(ops[i]);
533                         continue;
534                 }
535
536                 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
537                 ev->event_ptr = ops[i];
538                 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
539                 if (adapter->implicit_release_disabled)
540                         ev->op = RTE_EVENT_OP_FORWARD;
541                 else
542                         ev->op = RTE_EVENT_OP_NEW;
543         }
544
545         do {
546                 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
547                                                   event_port_id,
548                                                   &events[nb_enqueued],
549                                                   nb_ev - nb_enqueued);
550         } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
551                  nb_enqueued < nb_ev);
552
553         /* Free mbufs and rte_crypto_ops for failed events */
554         for (i = nb_enqueued; i < nb_ev; i++) {
555                 struct rte_crypto_op *op = events[i].event_ptr;
556                 rte_pktmbuf_free(op->sym->m_src);
557                 rte_crypto_op_free(op);
558         }
559
560         stats->event_enq_fail_count += nb_ev - nb_enqueued;
561         stats->event_enq_count += nb_enqueued;
562         stats->event_enq_retry_count += retry - 1;
563 }
564
565 static inline unsigned int
566 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
567                         unsigned int max_deq)
568 {
569         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
570         struct crypto_device_info *curr_dev;
571         struct crypto_queue_pair_info *curr_queue;
572         struct rte_crypto_op *ops[BATCH_SIZE];
573         uint16_t n, nb_deq;
574         struct rte_cryptodev *dev;
575         uint8_t cdev_id;
576         uint16_t qp, dev_qps;
577         bool done;
578         uint16_t num_cdev = rte_cryptodev_count();
579
580         nb_deq = 0;
581         do {
582                 uint16_t queues = 0;
583                 done = true;
584
585                 for (cdev_id = adapter->next_cdev_id;
586                         cdev_id < num_cdev; cdev_id++) {
587                         curr_dev = &adapter->cdevs[cdev_id];
588                         dev = curr_dev->dev;
589                         if (dev == NULL)
590                                 continue;
591                         dev_qps = dev->data->nb_queue_pairs;
592
593                         for (qp = curr_dev->next_queue_pair_id;
594                                 queues < dev_qps; qp = (qp + 1) % dev_qps,
595                                 queues++) {
596
597                                 curr_queue = &curr_dev->qpairs[qp];
598                                 if (!curr_queue->qp_enabled)
599                                         continue;
600
601                                 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
602                                         ops, BATCH_SIZE);
603                                 if (!n)
604                                         continue;
605
606                                 done = false;
607                                 stats->crypto_deq_count += n;
608                                 eca_ops_enqueue_burst(adapter, ops, n);
609                                 nb_deq += n;
610
611                                 if (nb_deq > max_deq) {
612                                         if ((qp + 1) == dev_qps) {
613                                                 adapter->next_cdev_id =
614                                                         (cdev_id + 1)
615                                                         % num_cdev;
616                                         }
617                                         curr_dev->next_queue_pair_id = (qp + 1)
618                                                 % dev->data->nb_queue_pairs;
619
620                                         return nb_deq;
621                                 }
622                         }
623                 }
624         } while (done == false);
625         return nb_deq;
626 }
627
628 static void
629 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
630                         unsigned int max_ops)
631 {
632         while (max_ops) {
633                 unsigned int e_cnt, d_cnt;
634
635                 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
636                 max_ops -= RTE_MIN(max_ops, e_cnt);
637
638                 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
639                 max_ops -= RTE_MIN(max_ops, d_cnt);
640
641                 if (e_cnt == 0 && d_cnt == 0)
642                         break;
643
644         }
645 }
646
647 static int
648 eca_service_func(void *args)
649 {
650         struct rte_event_crypto_adapter *adapter = args;
651
652         if (rte_spinlock_trylock(&adapter->lock) == 0)
653                 return 0;
654         eca_crypto_adapter_run(adapter, adapter->max_nb);
655         rte_spinlock_unlock(&adapter->lock);
656
657         return 0;
658 }
659
660 static int
661 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
662 {
663         struct rte_event_crypto_adapter_conf adapter_conf;
664         struct rte_service_spec service;
665         int ret;
666
667         if (adapter->service_inited)
668                 return 0;
669
670         memset(&service, 0, sizeof(service));
671         snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
672                 "rte_event_crypto_adapter_%d", id);
673         service.socket_id = adapter->socket_id;
674         service.callback = eca_service_func;
675         service.callback_userdata = adapter;
676         /* Service function handles locking for queue add/del updates */
677         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
678         ret = rte_service_component_register(&service, &adapter->service_id);
679         if (ret) {
680                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
681                         service.name, ret);
682                 return ret;
683         }
684
685         ret = adapter->conf_cb(id, adapter->eventdev_id,
686                 &adapter_conf, adapter->conf_arg);
687         if (ret) {
688                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
689                         ret);
690                 return ret;
691         }
692
693         adapter->max_nb = adapter_conf.max_nb;
694         adapter->event_port_id = adapter_conf.event_port_id;
695         adapter->service_inited = 1;
696
697         return ret;
698 }
699
700 static void
701 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
702                         struct crypto_device_info *dev_info,
703                         int32_t queue_pair_id,
704                         uint8_t add)
705 {
706         struct crypto_queue_pair_info *qp_info;
707         int enabled;
708         uint16_t i;
709
710         if (dev_info->qpairs == NULL)
711                 return;
712
713         if (queue_pair_id == -1) {
714                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
715                         eca_update_qp_info(adapter, dev_info, i, add);
716         } else {
717                 qp_info = &dev_info->qpairs[queue_pair_id];
718                 enabled = qp_info->qp_enabled;
719                 if (add) {
720                         adapter->nb_qps += !enabled;
721                         dev_info->num_qpairs += !enabled;
722                 } else {
723                         adapter->nb_qps -= enabled;
724                         dev_info->num_qpairs -= enabled;
725                 }
726                 qp_info->qp_enabled = !!add;
727         }
728 }
729
730 static int
731 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
732                 uint8_t cdev_id,
733                 int queue_pair_id)
734 {
735         struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
736         struct crypto_queue_pair_info *qpairs;
737         uint32_t i;
738
739         if (dev_info->qpairs == NULL) {
740                 dev_info->qpairs =
741                     rte_zmalloc_socket(adapter->mem_name,
742                                         dev_info->dev->data->nb_queue_pairs *
743                                         sizeof(struct crypto_queue_pair_info),
744                                         0, adapter->socket_id);
745                 if (dev_info->qpairs == NULL)
746                         return -ENOMEM;
747
748                 qpairs = dev_info->qpairs;
749                 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
750                                         BATCH_SIZE *
751                                         sizeof(struct rte_crypto_op *),
752                                         0, adapter->socket_id);
753                 if (!qpairs->op_buffer) {
754                         rte_free(qpairs);
755                         return -ENOMEM;
756                 }
757         }
758
759         if (queue_pair_id == -1) {
760                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
761                         eca_update_qp_info(adapter, dev_info, i, 1);
762         } else
763                 eca_update_qp_info(adapter, dev_info,
764                                         (uint16_t)queue_pair_id, 1);
765
766         return 0;
767 }
768
769 int
770 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
771                         uint8_t cdev_id,
772                         int32_t queue_pair_id,
773                         const struct rte_event *event)
774 {
775         struct rte_event_crypto_adapter *adapter;
776         struct rte_eventdev *dev;
777         struct crypto_device_info *dev_info;
778         uint32_t cap;
779         int ret;
780
781         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
782
783         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
784                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
785                 return -EINVAL;
786         }
787
788         adapter = eca_id_to_adapter(id);
789         if (adapter == NULL)
790                 return -EINVAL;
791
792         dev = &rte_eventdevs[adapter->eventdev_id];
793         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
794                                                 cdev_id,
795                                                 &cap);
796         if (ret) {
797                 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
798                         " cdev %" PRIu8, id, cdev_id);
799                 return ret;
800         }
801
802         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
803             (event == NULL)) {
804                 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
805                                   cdev_id);
806                 return -EINVAL;
807         }
808
809         dev_info = &adapter->cdevs[cdev_id];
810
811         if (queue_pair_id != -1 &&
812             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
813                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
814                                  (uint16_t)queue_pair_id);
815                 return -EINVAL;
816         }
817
818         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
819          * no need of service core as HW supports event forward capability.
820          */
821         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
822             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
823              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
824             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
825              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
826                 RTE_FUNC_PTR_OR_ERR_RET(
827                         *dev->dev_ops->crypto_adapter_queue_pair_add,
828                         -ENOTSUP);
829                 if (dev_info->qpairs == NULL) {
830                         dev_info->qpairs =
831                             rte_zmalloc_socket(adapter->mem_name,
832                                         dev_info->dev->data->nb_queue_pairs *
833                                         sizeof(struct crypto_queue_pair_info),
834                                         0, adapter->socket_id);
835                         if (dev_info->qpairs == NULL)
836                                 return -ENOMEM;
837                 }
838
839                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
840                                 dev_info->dev,
841                                 queue_pair_id,
842                                 event);
843                 if (ret)
844                         return ret;
845
846                 else
847                         eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
848                                            queue_pair_id, 1);
849         }
850
851         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
852          * or SW adapter, initiate services so the application can choose
853          * which ever way it wants to use the adapter.
854          * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
855          *         Application may wants to use one of below two mode
856          *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
857          *          b. OP_NEW mode -> HW Dequeue
858          * Case 2: No HW caps, use SW adapter
859          *          a. OP_FORWARD mode -> SW enqueue & dequeue
860          *          b. OP_NEW mode -> SW Dequeue
861          */
862         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
863              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
864              (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
865               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
866               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
867                (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
868                 rte_spinlock_lock(&adapter->lock);
869                 ret = eca_init_service(adapter, id);
870                 if (ret == 0)
871                         ret = eca_add_queue_pair(adapter, cdev_id,
872                                                  queue_pair_id);
873                 rte_spinlock_unlock(&adapter->lock);
874
875                 if (ret)
876                         return ret;
877
878                 rte_service_component_runstate_set(adapter->service_id, 1);
879         }
880
881         rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
882                 queue_pair_id);
883         return 0;
884 }
885
886 int
887 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
888                                         int32_t queue_pair_id)
889 {
890         struct rte_event_crypto_adapter *adapter;
891         struct crypto_device_info *dev_info;
892         struct rte_eventdev *dev;
893         int ret;
894         uint32_t cap;
895         uint16_t i;
896
897         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
898
899         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
900                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
901                 return -EINVAL;
902         }
903
904         adapter = eca_id_to_adapter(id);
905         if (adapter == NULL)
906                 return -EINVAL;
907
908         dev = &rte_eventdevs[adapter->eventdev_id];
909         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
910                                                 cdev_id,
911                                                 &cap);
912         if (ret)
913                 return ret;
914
915         dev_info = &adapter->cdevs[cdev_id];
916
917         if (queue_pair_id != -1 &&
918             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
919                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
920                                  (uint16_t)queue_pair_id);
921                 return -EINVAL;
922         }
923
924         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
925             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
926              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
927                 RTE_FUNC_PTR_OR_ERR_RET(
928                         *dev->dev_ops->crypto_adapter_queue_pair_del,
929                         -ENOTSUP);
930                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
931                                                 dev_info->dev,
932                                                 queue_pair_id);
933                 if (ret == 0) {
934                         eca_update_qp_info(adapter,
935                                         &adapter->cdevs[cdev_id],
936                                         queue_pair_id,
937                                         0);
938                         if (dev_info->num_qpairs == 0) {
939                                 rte_free(dev_info->qpairs);
940                                 dev_info->qpairs = NULL;
941                         }
942                 }
943         } else {
944                 if (adapter->nb_qps == 0)
945                         return 0;
946
947                 rte_spinlock_lock(&adapter->lock);
948                 if (queue_pair_id == -1) {
949                         for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
950                                 i++)
951                                 eca_update_qp_info(adapter, dev_info,
952                                                         queue_pair_id, 0);
953                 } else {
954                         eca_update_qp_info(adapter, dev_info,
955                                                 (uint16_t)queue_pair_id, 0);
956                 }
957
958                 if (dev_info->num_qpairs == 0) {
959                         rte_free(dev_info->qpairs);
960                         dev_info->qpairs = NULL;
961                 }
962
963                 rte_spinlock_unlock(&adapter->lock);
964                 rte_service_component_runstate_set(adapter->service_id,
965                                 adapter->nb_qps);
966         }
967
968         rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
969                 queue_pair_id, ret);
970         return ret;
971 }
972
973 static int
974 eca_adapter_ctrl(uint8_t id, int start)
975 {
976         struct rte_event_crypto_adapter *adapter;
977         struct crypto_device_info *dev_info;
978         struct rte_eventdev *dev;
979         uint32_t i;
980         int use_service;
981         int stop = !start;
982
983         use_service = 0;
984         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
985         adapter = eca_id_to_adapter(id);
986         if (adapter == NULL)
987                 return -EINVAL;
988
989         dev = &rte_eventdevs[adapter->eventdev_id];
990
991         for (i = 0; i < rte_cryptodev_count(); i++) {
992                 dev_info = &adapter->cdevs[i];
993                 /* if start  check for num queue pairs */
994                 if (start && !dev_info->num_qpairs)
995                         continue;
996                 /* if stop check if dev has been started */
997                 if (stop && !dev_info->dev_started)
998                         continue;
999                 use_service |= !dev_info->internal_event_port;
1000                 dev_info->dev_started = start;
1001                 if (dev_info->internal_event_port == 0)
1002                         continue;
1003                 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1004                                                 &dev_info->dev[i]) :
1005                         (*dev->dev_ops->crypto_adapter_stop)(dev,
1006                                                 &dev_info->dev[i]);
1007         }
1008
1009         if (use_service)
1010                 rte_service_runstate_set(adapter->service_id, start);
1011
1012         return 0;
1013 }
1014
1015 int
1016 rte_event_crypto_adapter_start(uint8_t id)
1017 {
1018         struct rte_event_crypto_adapter *adapter;
1019
1020         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1021         adapter = eca_id_to_adapter(id);
1022         if (adapter == NULL)
1023                 return -EINVAL;
1024
1025         rte_eventdev_trace_crypto_adapter_start(id, adapter);
1026         return eca_adapter_ctrl(id, 1);
1027 }
1028
1029 int
1030 rte_event_crypto_adapter_stop(uint8_t id)
1031 {
1032         rte_eventdev_trace_crypto_adapter_stop(id);
1033         return eca_adapter_ctrl(id, 0);
1034 }
1035
1036 int
1037 rte_event_crypto_adapter_stats_get(uint8_t id,
1038                                 struct rte_event_crypto_adapter_stats *stats)
1039 {
1040         struct rte_event_crypto_adapter *adapter;
1041         struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1042         struct rte_event_crypto_adapter_stats dev_stats;
1043         struct rte_eventdev *dev;
1044         struct crypto_device_info *dev_info;
1045         uint32_t i;
1046         int ret;
1047
1048         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1049
1050         adapter = eca_id_to_adapter(id);
1051         if (adapter == NULL || stats == NULL)
1052                 return -EINVAL;
1053
1054         dev = &rte_eventdevs[adapter->eventdev_id];
1055         memset(stats, 0, sizeof(*stats));
1056         for (i = 0; i < rte_cryptodev_count(); i++) {
1057                 dev_info = &adapter->cdevs[i];
1058                 if (dev_info->internal_event_port == 0 ||
1059                         dev->dev_ops->crypto_adapter_stats_get == NULL)
1060                         continue;
1061                 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1062                                                 dev_info->dev,
1063                                                 &dev_stats);
1064                 if (ret)
1065                         continue;
1066
1067                 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1068                 dev_stats_sum.event_enq_count +=
1069                         dev_stats.event_enq_count;
1070         }
1071
1072         if (adapter->service_inited)
1073                 *stats = adapter->crypto_stats;
1074
1075         stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1076         stats->event_enq_count += dev_stats_sum.event_enq_count;
1077
1078         return 0;
1079 }
1080
1081 int
1082 rte_event_crypto_adapter_stats_reset(uint8_t id)
1083 {
1084         struct rte_event_crypto_adapter *adapter;
1085         struct crypto_device_info *dev_info;
1086         struct rte_eventdev *dev;
1087         uint32_t i;
1088
1089         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1090
1091         adapter = eca_id_to_adapter(id);
1092         if (adapter == NULL)
1093                 return -EINVAL;
1094
1095         dev = &rte_eventdevs[adapter->eventdev_id];
1096         for (i = 0; i < rte_cryptodev_count(); i++) {
1097                 dev_info = &adapter->cdevs[i];
1098                 if (dev_info->internal_event_port == 0 ||
1099                         dev->dev_ops->crypto_adapter_stats_reset == NULL)
1100                         continue;
1101                 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1102                                                 dev_info->dev);
1103         }
1104
1105         memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1106         return 0;
1107 }
1108
1109 int
1110 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1111 {
1112         struct rte_event_crypto_adapter *adapter;
1113
1114         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1115
1116         adapter = eca_id_to_adapter(id);
1117         if (adapter == NULL || service_id == NULL)
1118                 return -EINVAL;
1119
1120         if (adapter->service_inited)
1121                 *service_id = adapter->service_id;
1122
1123         return adapter->service_inited ? 0 : -ESRCH;
1124 }
1125
1126 int
1127 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1128 {
1129         struct rte_event_crypto_adapter *adapter;
1130
1131         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1132
1133         adapter = eca_id_to_adapter(id);
1134         if (adapter == NULL || event_port_id == NULL)
1135                 return -EINVAL;
1136
1137         *event_port_id = adapter->event_port_id;
1138
1139         return 0;
1140 }