raw/cnxk_bphy: keep leading zero in device name
[dpdk.git] / lib / eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29  * iterations of eca_crypto_adapter_enq_run()
30  */
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32
33 struct event_crypto_adapter {
34         /* Event device identifier */
35         uint8_t eventdev_id;
36         /* Event port identifier */
37         uint8_t event_port_id;
38         /* Store event device's implicit release capability */
39         uint8_t implicit_release_disabled;
40         /* Max crypto ops processed in any service function invocation */
41         uint32_t max_nb;
42         /* Lock to serialize config updates with service function */
43         rte_spinlock_t lock;
44         /* Next crypto device to be processed */
45         uint16_t next_cdev_id;
46         /* Per crypto device structure */
47         struct crypto_device_info *cdevs;
48         /* Loop counter to flush crypto ops */
49         uint16_t transmit_loop_count;
50         /* Per instance stats structure */
51         struct rte_event_crypto_adapter_stats crypto_stats;
52         /* Configuration callback for rte_service configuration */
53         rte_event_crypto_adapter_conf_cb conf_cb;
54         /* Configuration callback argument */
55         void *conf_arg;
56         /* Set if  default_cb is being used */
57         int default_cb_arg;
58         /* Service initialization state */
59         uint8_t service_inited;
60         /* Memory allocation name */
61         char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62         /* Socket identifier cached from eventdev */
63         int socket_id;
64         /* Per adapter EAL service */
65         uint32_t service_id;
66         /* No. of queue pairs configured */
67         uint16_t nb_qps;
68         /* Adapter mode */
69         enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
71
72 /* Per crypto device information */
73 struct crypto_device_info {
74         /* Pointer to cryptodev */
75         struct rte_cryptodev *dev;
76         /* Pointer to queue pair info */
77         struct crypto_queue_pair_info *qpairs;
78         /* Next queue pair to be processed */
79         uint16_t next_queue_pair_id;
80         /* Set to indicate cryptodev->eventdev packet
81          * transfer uses a hardware mechanism
82          */
83         uint8_t internal_event_port;
84         /* Set to indicate processing has been started */
85         uint8_t dev_started;
86         /* If num_qpairs > 0, the start callback will
87          * be invoked if not already invoked
88          */
89         uint16_t num_qpairs;
90 } __rte_cache_aligned;
91
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94         /* Set to indicate queue pair is enabled */
95         bool qp_enabled;
96         /* Pointer to hold rte_crypto_ops for batching */
97         struct rte_crypto_op **op_buffer;
98         /* No of crypto ops accumulated */
99         uint8_t len;
100 } __rte_cache_aligned;
101
102 static struct event_crypto_adapter **event_crypto_adapter;
103
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106         if (!eca_valid_id(id)) { \
107                 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
108                 return retval; \
109         } \
110 } while (0)
111
112 static inline int
113 eca_valid_id(uint8_t id)
114 {
115         return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
116 }
117
118 static int
119 eca_init(void)
120 {
121         const char *name = "crypto_adapter_array";
122         const struct rte_memzone *mz;
123         unsigned int sz;
124
125         sz = sizeof(*event_crypto_adapter) *
126             RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
128
129         mz = rte_memzone_lookup(name);
130         if (mz == NULL) {
131                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132                                                  RTE_CACHE_LINE_SIZE);
133                 if (mz == NULL) {
134                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
135                                         PRId32, rte_errno);
136                         return -rte_errno;
137                 }
138         }
139
140         event_crypto_adapter = mz->addr;
141         return 0;
142 }
143
144 static inline struct event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
146 {
147         return event_crypto_adapter ?
148                 event_crypto_adapter[id] : NULL;
149 }
150
151 static int
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153                         struct rte_event_crypto_adapter_conf *conf, void *arg)
154 {
155         struct rte_event_dev_config dev_conf;
156         struct rte_eventdev *dev;
157         uint8_t port_id;
158         int started;
159         int ret;
160         struct rte_event_port_conf *port_conf = arg;
161         struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
162
163         if (adapter == NULL)
164                 return -EINVAL;
165
166         dev = &rte_eventdevs[adapter->eventdev_id];
167         dev_conf = dev->data->dev_conf;
168
169         started = dev->data->dev_started;
170         if (started)
171                 rte_event_dev_stop(dev_id);
172         port_id = dev_conf.nb_event_ports;
173         dev_conf.nb_event_ports += 1;
174         ret = rte_event_dev_configure(dev_id, &dev_conf);
175         if (ret) {
176                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
177                 if (started) {
178                         if (rte_event_dev_start(dev_id))
179                                 return -EIO;
180                 }
181                 return ret;
182         }
183
184         ret = rte_event_port_setup(dev_id, port_id, port_conf);
185         if (ret) {
186                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
187                 return ret;
188         }
189
190         conf->event_port_id = port_id;
191         conf->max_nb = DEFAULT_MAX_NB;
192         if (started)
193                 ret = rte_event_dev_start(dev_id);
194
195         adapter->default_cb_arg = 1;
196         return ret;
197 }
198
199 int
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201                                 rte_event_crypto_adapter_conf_cb conf_cb,
202                                 enum rte_event_crypto_adapter_mode mode,
203                                 void *conf_arg)
204 {
205         struct event_crypto_adapter *adapter;
206         char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207         struct rte_event_dev_info dev_info;
208         int socket_id;
209         uint8_t i;
210         int ret;
211
212         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
214         if (conf_cb == NULL)
215                 return -EINVAL;
216
217         if (event_crypto_adapter == NULL) {
218                 ret = eca_init();
219                 if (ret)
220                         return ret;
221         }
222
223         adapter = eca_id_to_adapter(id);
224         if (adapter != NULL) {
225                 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
226                 return -EEXIST;
227         }
228
229         socket_id = rte_event_dev_socket_id(dev_id);
230         snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231                  "rte_event_crypto_adapter_%d", id);
232
233         adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234                         RTE_CACHE_LINE_SIZE, socket_id);
235         if (adapter == NULL) {
236                 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
237                 return -ENOMEM;
238         }
239
240         ret = rte_event_dev_info_get(dev_id, &dev_info);
241         if (ret < 0) {
242                 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243                                  dev_id, dev_info.driver_name);
244                 rte_free(adapter);
245                 return ret;
246         }
247
248         adapter->implicit_release_disabled = (dev_info.event_dev_cap &
249                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
250         adapter->eventdev_id = dev_id;
251         adapter->socket_id = socket_id;
252         adapter->conf_cb = conf_cb;
253         adapter->conf_arg = conf_arg;
254         adapter->mode = mode;
255         strcpy(adapter->mem_name, mem_name);
256         adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
257                                         rte_cryptodev_count() *
258                                         sizeof(struct crypto_device_info), 0,
259                                         socket_id);
260         if (adapter->cdevs == NULL) {
261                 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
262                 rte_free(adapter);
263                 return -ENOMEM;
264         }
265
266         rte_spinlock_init(&adapter->lock);
267         for (i = 0; i < rte_cryptodev_count(); i++)
268                 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
269
270         event_crypto_adapter[id] = adapter;
271
272         rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
273                 mode);
274         return 0;
275 }
276
277
278 int
279 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
280                                 struct rte_event_port_conf *port_config,
281                                 enum rte_event_crypto_adapter_mode mode)
282 {
283         struct rte_event_port_conf *pc;
284         int ret;
285
286         if (port_config == NULL)
287                 return -EINVAL;
288         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289
290         pc = rte_malloc(NULL, sizeof(*pc), 0);
291         if (pc == NULL)
292                 return -ENOMEM;
293         *pc = *port_config;
294         ret = rte_event_crypto_adapter_create_ext(id, dev_id,
295                                                   eca_default_config_cb,
296                                                   mode,
297                                                   pc);
298         if (ret)
299                 rte_free(pc);
300
301         return ret;
302 }
303
304 int
305 rte_event_crypto_adapter_free(uint8_t id)
306 {
307         struct event_crypto_adapter *adapter;
308
309         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
310
311         adapter = eca_id_to_adapter(id);
312         if (adapter == NULL)
313                 return -EINVAL;
314
315         if (adapter->nb_qps) {
316                 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
317                                 adapter->nb_qps);
318                 return -EBUSY;
319         }
320
321         rte_eventdev_trace_crypto_adapter_free(id, adapter);
322         if (adapter->default_cb_arg)
323                 rte_free(adapter->conf_arg);
324         rte_free(adapter->cdevs);
325         rte_free(adapter);
326         event_crypto_adapter[id] = NULL;
327
328         return 0;
329 }
330
331 static inline unsigned int
332 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
333                      unsigned int cnt)
334 {
335         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
336         union rte_event_crypto_metadata *m_data = NULL;
337         struct crypto_queue_pair_info *qp_info = NULL;
338         struct rte_crypto_op *crypto_op;
339         unsigned int i, n;
340         uint16_t qp_id, len, ret;
341         uint8_t cdev_id;
342
343         len = 0;
344         ret = 0;
345         n = 0;
346         stats->event_deq_count += cnt;
347
348         for (i = 0; i < cnt; i++) {
349                 crypto_op = ev[i].event_ptr;
350                 if (crypto_op == NULL)
351                         continue;
352                 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
353                         m_data = rte_cryptodev_sym_session_get_user_data(
354                                         crypto_op->sym->session);
355                         if (m_data == NULL) {
356                                 rte_pktmbuf_free(crypto_op->sym->m_src);
357                                 rte_crypto_op_free(crypto_op);
358                                 continue;
359                         }
360
361                         cdev_id = m_data->request_info.cdev_id;
362                         qp_id = m_data->request_info.queue_pair_id;
363                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
364                         if (!qp_info->qp_enabled) {
365                                 rte_pktmbuf_free(crypto_op->sym->m_src);
366                                 rte_crypto_op_free(crypto_op);
367                                 continue;
368                         }
369                         len = qp_info->len;
370                         qp_info->op_buffer[len] = crypto_op;
371                         len++;
372                 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
373                                 crypto_op->private_data_offset) {
374                         m_data = (union rte_event_crypto_metadata *)
375                                  ((uint8_t *)crypto_op +
376                                         crypto_op->private_data_offset);
377                         cdev_id = m_data->request_info.cdev_id;
378                         qp_id = m_data->request_info.queue_pair_id;
379                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
380                         if (!qp_info->qp_enabled) {
381                                 rte_pktmbuf_free(crypto_op->sym->m_src);
382                                 rte_crypto_op_free(crypto_op);
383                                 continue;
384                         }
385                         len = qp_info->len;
386                         qp_info->op_buffer[len] = crypto_op;
387                         len++;
388                 } else {
389                         rte_pktmbuf_free(crypto_op->sym->m_src);
390                         rte_crypto_op_free(crypto_op);
391                         continue;
392                 }
393
394                 if (len == BATCH_SIZE) {
395                         struct rte_crypto_op **op_buffer = qp_info->op_buffer;
396                         ret = rte_cryptodev_enqueue_burst(cdev_id,
397                                                           qp_id,
398                                                           op_buffer,
399                                                           BATCH_SIZE);
400
401                         stats->crypto_enq_count += ret;
402
403                         while (ret < len) {
404                                 struct rte_crypto_op *op;
405                                 op = op_buffer[ret++];
406                                 stats->crypto_enq_fail++;
407                                 rte_pktmbuf_free(op->sym->m_src);
408                                 rte_crypto_op_free(op);
409                         }
410
411                         len = 0;
412                 }
413
414                 if (qp_info)
415                         qp_info->len = len;
416                 n += ret;
417         }
418
419         return n;
420 }
421
422 static unsigned int
423 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
424 {
425         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
426         struct crypto_device_info *curr_dev;
427         struct crypto_queue_pair_info *curr_queue;
428         struct rte_crypto_op **op_buffer;
429         struct rte_cryptodev *dev;
430         uint8_t cdev_id;
431         uint16_t qp;
432         uint16_t ret;
433         uint16_t num_cdev = rte_cryptodev_count();
434
435         ret = 0;
436         for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
437                 curr_dev = &adapter->cdevs[cdev_id];
438                 dev = curr_dev->dev;
439                 if (dev == NULL)
440                         continue;
441                 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
442
443                         curr_queue = &curr_dev->qpairs[qp];
444                         if (!curr_queue->qp_enabled)
445                                 continue;
446
447                         op_buffer = curr_queue->op_buffer;
448                         ret = rte_cryptodev_enqueue_burst(cdev_id,
449                                                           qp,
450                                                           op_buffer,
451                                                           curr_queue->len);
452                         stats->crypto_enq_count += ret;
453
454                         while (ret < curr_queue->len) {
455                                 struct rte_crypto_op *op;
456                                 op = op_buffer[ret++];
457                                 stats->crypto_enq_fail++;
458                                 rte_pktmbuf_free(op->sym->m_src);
459                                 rte_crypto_op_free(op);
460                         }
461                         curr_queue->len = 0;
462                 }
463         }
464
465         return ret;
466 }
467
468 static int
469 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
470                            unsigned int max_enq)
471 {
472         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
473         struct rte_event ev[BATCH_SIZE];
474         unsigned int nb_enq, nb_enqueued;
475         uint16_t n;
476         uint8_t event_dev_id = adapter->eventdev_id;
477         uint8_t event_port_id = adapter->event_port_id;
478
479         nb_enqueued = 0;
480         if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
481                 return 0;
482
483         for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
484                 stats->event_poll_count++;
485                 n = rte_event_dequeue_burst(event_dev_id,
486                                             event_port_id, ev, BATCH_SIZE, 0);
487
488                 if (!n)
489                         break;
490
491                 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
492         }
493
494         if ((++adapter->transmit_loop_count &
495                 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
496                 nb_enqueued += eca_crypto_enq_flush(adapter);
497         }
498
499         return nb_enqueued;
500 }
501
502 static inline void
503 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
504                       struct rte_crypto_op **ops, uint16_t num)
505 {
506         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
507         union rte_event_crypto_metadata *m_data = NULL;
508         uint8_t event_dev_id = adapter->eventdev_id;
509         uint8_t event_port_id = adapter->event_port_id;
510         struct rte_event events[BATCH_SIZE];
511         uint16_t nb_enqueued, nb_ev;
512         uint8_t retry;
513         uint8_t i;
514
515         nb_ev = 0;
516         retry = 0;
517         nb_enqueued = 0;
518         num = RTE_MIN(num, BATCH_SIZE);
519         for (i = 0; i < num; i++) {
520                 struct rte_event *ev = &events[nb_ev++];
521                 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
522                         m_data = rte_cryptodev_sym_session_get_user_data(
523                                         ops[i]->sym->session);
524                 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
525                                 ops[i]->private_data_offset) {
526                         m_data = (union rte_event_crypto_metadata *)
527                                  ((uint8_t *)ops[i] +
528                                   ops[i]->private_data_offset);
529                 }
530
531                 if (unlikely(m_data == NULL)) {
532                         rte_pktmbuf_free(ops[i]->sym->m_src);
533                         rte_crypto_op_free(ops[i]);
534                         continue;
535                 }
536
537                 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
538                 ev->event_ptr = ops[i];
539                 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
540                 if (adapter->implicit_release_disabled)
541                         ev->op = RTE_EVENT_OP_FORWARD;
542                 else
543                         ev->op = RTE_EVENT_OP_NEW;
544         }
545
546         do {
547                 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
548                                                   event_port_id,
549                                                   &events[nb_enqueued],
550                                                   nb_ev - nb_enqueued);
551         } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
552                  nb_enqueued < nb_ev);
553
554         /* Free mbufs and rte_crypto_ops for failed events */
555         for (i = nb_enqueued; i < nb_ev; i++) {
556                 struct rte_crypto_op *op = events[i].event_ptr;
557                 rte_pktmbuf_free(op->sym->m_src);
558                 rte_crypto_op_free(op);
559         }
560
561         stats->event_enq_fail_count += nb_ev - nb_enqueued;
562         stats->event_enq_count += nb_enqueued;
563         stats->event_enq_retry_count += retry - 1;
564 }
565
566 static inline unsigned int
567 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
568                            unsigned int max_deq)
569 {
570         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
571         struct crypto_device_info *curr_dev;
572         struct crypto_queue_pair_info *curr_queue;
573         struct rte_crypto_op *ops[BATCH_SIZE];
574         uint16_t n, nb_deq;
575         struct rte_cryptodev *dev;
576         uint8_t cdev_id;
577         uint16_t qp, dev_qps;
578         bool done;
579         uint16_t num_cdev = rte_cryptodev_count();
580
581         nb_deq = 0;
582         do {
583                 uint16_t queues = 0;
584                 done = true;
585
586                 for (cdev_id = adapter->next_cdev_id;
587                         cdev_id < num_cdev; cdev_id++) {
588                         curr_dev = &adapter->cdevs[cdev_id];
589                         dev = curr_dev->dev;
590                         if (dev == NULL)
591                                 continue;
592                         dev_qps = dev->data->nb_queue_pairs;
593
594                         for (qp = curr_dev->next_queue_pair_id;
595                                 queues < dev_qps; qp = (qp + 1) % dev_qps,
596                                 queues++) {
597
598                                 curr_queue = &curr_dev->qpairs[qp];
599                                 if (!curr_queue->qp_enabled)
600                                         continue;
601
602                                 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
603                                         ops, BATCH_SIZE);
604                                 if (!n)
605                                         continue;
606
607                                 done = false;
608                                 stats->crypto_deq_count += n;
609                                 eca_ops_enqueue_burst(adapter, ops, n);
610                                 nb_deq += n;
611
612                                 if (nb_deq > max_deq) {
613                                         if ((qp + 1) == dev_qps) {
614                                                 adapter->next_cdev_id =
615                                                         (cdev_id + 1)
616                                                         % num_cdev;
617                                         }
618                                         curr_dev->next_queue_pair_id = (qp + 1)
619                                                 % dev->data->nb_queue_pairs;
620
621                                         return nb_deq;
622                                 }
623                         }
624                 }
625         } while (done == false);
626         return nb_deq;
627 }
628
629 static void
630 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
631                        unsigned int max_ops)
632 {
633         while (max_ops) {
634                 unsigned int e_cnt, d_cnt;
635
636                 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
637                 max_ops -= RTE_MIN(max_ops, e_cnt);
638
639                 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
640                 max_ops -= RTE_MIN(max_ops, d_cnt);
641
642                 if (e_cnt == 0 && d_cnt == 0)
643                         break;
644
645         }
646 }
647
648 static int
649 eca_service_func(void *args)
650 {
651         struct event_crypto_adapter *adapter = args;
652
653         if (rte_spinlock_trylock(&adapter->lock) == 0)
654                 return 0;
655         eca_crypto_adapter_run(adapter, adapter->max_nb);
656         rte_spinlock_unlock(&adapter->lock);
657
658         return 0;
659 }
660
661 static int
662 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
663 {
664         struct rte_event_crypto_adapter_conf adapter_conf;
665         struct rte_service_spec service;
666         int ret;
667
668         if (adapter->service_inited)
669                 return 0;
670
671         memset(&service, 0, sizeof(service));
672         snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
673                 "rte_event_crypto_adapter_%d", id);
674         service.socket_id = adapter->socket_id;
675         service.callback = eca_service_func;
676         service.callback_userdata = adapter;
677         /* Service function handles locking for queue add/del updates */
678         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
679         ret = rte_service_component_register(&service, &adapter->service_id);
680         if (ret) {
681                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
682                         service.name, ret);
683                 return ret;
684         }
685
686         ret = adapter->conf_cb(id, adapter->eventdev_id,
687                 &adapter_conf, adapter->conf_arg);
688         if (ret) {
689                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
690                         ret);
691                 return ret;
692         }
693
694         adapter->max_nb = adapter_conf.max_nb;
695         adapter->event_port_id = adapter_conf.event_port_id;
696         adapter->service_inited = 1;
697
698         return ret;
699 }
700
701 static void
702 eca_update_qp_info(struct event_crypto_adapter *adapter,
703                    struct crypto_device_info *dev_info, int32_t queue_pair_id,
704                    uint8_t add)
705 {
706         struct crypto_queue_pair_info *qp_info;
707         int enabled;
708         uint16_t i;
709
710         if (dev_info->qpairs == NULL)
711                 return;
712
713         if (queue_pair_id == -1) {
714                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
715                         eca_update_qp_info(adapter, dev_info, i, add);
716         } else {
717                 qp_info = &dev_info->qpairs[queue_pair_id];
718                 enabled = qp_info->qp_enabled;
719                 if (add) {
720                         adapter->nb_qps += !enabled;
721                         dev_info->num_qpairs += !enabled;
722                 } else {
723                         adapter->nb_qps -= enabled;
724                         dev_info->num_qpairs -= enabled;
725                 }
726                 qp_info->qp_enabled = !!add;
727         }
728 }
729
730 static int
731 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
732                    int queue_pair_id)
733 {
734         struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
735         struct crypto_queue_pair_info *qpairs;
736         uint32_t i;
737
738         if (dev_info->qpairs == NULL) {
739                 dev_info->qpairs =
740                     rte_zmalloc_socket(adapter->mem_name,
741                                         dev_info->dev->data->nb_queue_pairs *
742                                         sizeof(struct crypto_queue_pair_info),
743                                         0, adapter->socket_id);
744                 if (dev_info->qpairs == NULL)
745                         return -ENOMEM;
746
747                 qpairs = dev_info->qpairs;
748                 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
749                                         BATCH_SIZE *
750                                         sizeof(struct rte_crypto_op *),
751                                         0, adapter->socket_id);
752                 if (!qpairs->op_buffer) {
753                         rte_free(qpairs);
754                         return -ENOMEM;
755                 }
756         }
757
758         if (queue_pair_id == -1) {
759                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
760                         eca_update_qp_info(adapter, dev_info, i, 1);
761         } else
762                 eca_update_qp_info(adapter, dev_info,
763                                         (uint16_t)queue_pair_id, 1);
764
765         return 0;
766 }
767
768 int
769 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
770                         uint8_t cdev_id,
771                         int32_t queue_pair_id,
772                         const struct rte_event *event)
773 {
774         struct event_crypto_adapter *adapter;
775         struct rte_eventdev *dev;
776         struct crypto_device_info *dev_info;
777         uint32_t cap;
778         int ret;
779
780         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
781
782         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
783                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
784                 return -EINVAL;
785         }
786
787         adapter = eca_id_to_adapter(id);
788         if (adapter == NULL)
789                 return -EINVAL;
790
791         dev = &rte_eventdevs[adapter->eventdev_id];
792         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
793                                                 cdev_id,
794                                                 &cap);
795         if (ret) {
796                 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
797                         " cdev %" PRIu8, id, cdev_id);
798                 return ret;
799         }
800
801         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
802             (event == NULL)) {
803                 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
804                                   cdev_id);
805                 return -EINVAL;
806         }
807
808         dev_info = &adapter->cdevs[cdev_id];
809
810         if (queue_pair_id != -1 &&
811             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
812                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
813                                  (uint16_t)queue_pair_id);
814                 return -EINVAL;
815         }
816
817         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
818          * no need of service core as HW supports event forward capability.
819          */
820         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
821             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
822              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
823             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
824              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
825                 RTE_FUNC_PTR_OR_ERR_RET(
826                         *dev->dev_ops->crypto_adapter_queue_pair_add,
827                         -ENOTSUP);
828                 if (dev_info->qpairs == NULL) {
829                         dev_info->qpairs =
830                             rte_zmalloc_socket(adapter->mem_name,
831                                         dev_info->dev->data->nb_queue_pairs *
832                                         sizeof(struct crypto_queue_pair_info),
833                                         0, adapter->socket_id);
834                         if (dev_info->qpairs == NULL)
835                                 return -ENOMEM;
836                 }
837
838                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
839                                 dev_info->dev,
840                                 queue_pair_id,
841                                 event);
842                 if (ret)
843                         return ret;
844
845                 else
846                         eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
847                                            queue_pair_id, 1);
848         }
849
850         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
851          * or SW adapter, initiate services so the application can choose
852          * which ever way it wants to use the adapter.
853          * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
854          *         Application may wants to use one of below two mode
855          *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
856          *          b. OP_NEW mode -> HW Dequeue
857          * Case 2: No HW caps, use SW adapter
858          *          a. OP_FORWARD mode -> SW enqueue & dequeue
859          *          b. OP_NEW mode -> SW Dequeue
860          */
861         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
862              !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
863              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
864              (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
865               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
866               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
867                (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
868                 rte_spinlock_lock(&adapter->lock);
869                 ret = eca_init_service(adapter, id);
870                 if (ret == 0)
871                         ret = eca_add_queue_pair(adapter, cdev_id,
872                                                  queue_pair_id);
873                 rte_spinlock_unlock(&adapter->lock);
874
875                 if (ret)
876                         return ret;
877
878                 rte_service_component_runstate_set(adapter->service_id, 1);
879         }
880
881         rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
882                 queue_pair_id);
883         return 0;
884 }
885
886 int
887 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
888                                         int32_t queue_pair_id)
889 {
890         struct event_crypto_adapter *adapter;
891         struct crypto_device_info *dev_info;
892         struct rte_eventdev *dev;
893         int ret;
894         uint32_t cap;
895         uint16_t i;
896
897         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
898
899         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
900                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
901                 return -EINVAL;
902         }
903
904         adapter = eca_id_to_adapter(id);
905         if (adapter == NULL)
906                 return -EINVAL;
907
908         dev = &rte_eventdevs[adapter->eventdev_id];
909         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
910                                                 cdev_id,
911                                                 &cap);
912         if (ret)
913                 return ret;
914
915         dev_info = &adapter->cdevs[cdev_id];
916
917         if (queue_pair_id != -1 &&
918             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
919                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
920                                  (uint16_t)queue_pair_id);
921                 return -EINVAL;
922         }
923
924         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
925             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
926              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
927                 RTE_FUNC_PTR_OR_ERR_RET(
928                         *dev->dev_ops->crypto_adapter_queue_pair_del,
929                         -ENOTSUP);
930                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
931                                                 dev_info->dev,
932                                                 queue_pair_id);
933                 if (ret == 0) {
934                         eca_update_qp_info(adapter,
935                                         &adapter->cdevs[cdev_id],
936                                         queue_pair_id,
937                                         0);
938                         if (dev_info->num_qpairs == 0) {
939                                 rte_free(dev_info->qpairs);
940                                 dev_info->qpairs = NULL;
941                         }
942                 }
943         } else {
944                 if (adapter->nb_qps == 0)
945                         return 0;
946
947                 rte_spinlock_lock(&adapter->lock);
948                 if (queue_pair_id == -1) {
949                         for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
950                                 i++)
951                                 eca_update_qp_info(adapter, dev_info,
952                                                         queue_pair_id, 0);
953                 } else {
954                         eca_update_qp_info(adapter, dev_info,
955                                                 (uint16_t)queue_pair_id, 0);
956                 }
957
958                 if (dev_info->num_qpairs == 0) {
959                         rte_free(dev_info->qpairs);
960                         dev_info->qpairs = NULL;
961                 }
962
963                 rte_spinlock_unlock(&adapter->lock);
964                 rte_service_component_runstate_set(adapter->service_id,
965                                 adapter->nb_qps);
966         }
967
968         rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
969                 queue_pair_id, ret);
970         return ret;
971 }
972
973 static int
974 eca_adapter_ctrl(uint8_t id, int start)
975 {
976         struct event_crypto_adapter *adapter;
977         struct crypto_device_info *dev_info;
978         struct rte_eventdev *dev;
979         uint32_t i;
980         int use_service;
981         int stop = !start;
982
983         use_service = 0;
984         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
985         adapter = eca_id_to_adapter(id);
986         if (adapter == NULL)
987                 return -EINVAL;
988
989         dev = &rte_eventdevs[adapter->eventdev_id];
990
991         for (i = 0; i < rte_cryptodev_count(); i++) {
992                 dev_info = &adapter->cdevs[i];
993                 /* if start  check for num queue pairs */
994                 if (start && !dev_info->num_qpairs)
995                         continue;
996                 /* if stop check if dev has been started */
997                 if (stop && !dev_info->dev_started)
998                         continue;
999                 use_service |= !dev_info->internal_event_port;
1000                 dev_info->dev_started = start;
1001                 if (dev_info->internal_event_port == 0)
1002                         continue;
1003                 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1004                                                 &dev_info->dev[i]) :
1005                         (*dev->dev_ops->crypto_adapter_stop)(dev,
1006                                                 &dev_info->dev[i]);
1007         }
1008
1009         if (use_service)
1010                 rte_service_runstate_set(adapter->service_id, start);
1011
1012         return 0;
1013 }
1014
1015 int
1016 rte_event_crypto_adapter_start(uint8_t id)
1017 {
1018         struct event_crypto_adapter *adapter;
1019
1020         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1021         adapter = eca_id_to_adapter(id);
1022         if (adapter == NULL)
1023                 return -EINVAL;
1024
1025         rte_eventdev_trace_crypto_adapter_start(id, adapter);
1026         return eca_adapter_ctrl(id, 1);
1027 }
1028
1029 int
1030 rte_event_crypto_adapter_stop(uint8_t id)
1031 {
1032         rte_eventdev_trace_crypto_adapter_stop(id);
1033         return eca_adapter_ctrl(id, 0);
1034 }
1035
1036 int
1037 rte_event_crypto_adapter_stats_get(uint8_t id,
1038                                 struct rte_event_crypto_adapter_stats *stats)
1039 {
1040         struct event_crypto_adapter *adapter;
1041         struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1042         struct rte_event_crypto_adapter_stats dev_stats;
1043         struct rte_eventdev *dev;
1044         struct crypto_device_info *dev_info;
1045         uint32_t i;
1046         int ret;
1047
1048         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1049
1050         adapter = eca_id_to_adapter(id);
1051         if (adapter == NULL || stats == NULL)
1052                 return -EINVAL;
1053
1054         dev = &rte_eventdevs[adapter->eventdev_id];
1055         memset(stats, 0, sizeof(*stats));
1056         for (i = 0; i < rte_cryptodev_count(); i++) {
1057                 dev_info = &adapter->cdevs[i];
1058                 if (dev_info->internal_event_port == 0 ||
1059                         dev->dev_ops->crypto_adapter_stats_get == NULL)
1060                         continue;
1061                 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1062                                                 dev_info->dev,
1063                                                 &dev_stats);
1064                 if (ret)
1065                         continue;
1066
1067                 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1068                 dev_stats_sum.event_enq_count +=
1069                         dev_stats.event_enq_count;
1070         }
1071
1072         if (adapter->service_inited)
1073                 *stats = adapter->crypto_stats;
1074
1075         stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1076         stats->event_enq_count += dev_stats_sum.event_enq_count;
1077
1078         return 0;
1079 }
1080
1081 int
1082 rte_event_crypto_adapter_stats_reset(uint8_t id)
1083 {
1084         struct event_crypto_adapter *adapter;
1085         struct crypto_device_info *dev_info;
1086         struct rte_eventdev *dev;
1087         uint32_t i;
1088
1089         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1090
1091         adapter = eca_id_to_adapter(id);
1092         if (adapter == NULL)
1093                 return -EINVAL;
1094
1095         dev = &rte_eventdevs[adapter->eventdev_id];
1096         for (i = 0; i < rte_cryptodev_count(); i++) {
1097                 dev_info = &adapter->cdevs[i];
1098                 if (dev_info->internal_event_port == 0 ||
1099                         dev->dev_ops->crypto_adapter_stats_reset == NULL)
1100                         continue;
1101                 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1102                                                 dev_info->dev);
1103         }
1104
1105         memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1106         return 0;
1107 }
1108
1109 int
1110 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1111 {
1112         struct event_crypto_adapter *adapter;
1113
1114         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1115
1116         adapter = eca_id_to_adapter(id);
1117         if (adapter == NULL || service_id == NULL)
1118                 return -EINVAL;
1119
1120         if (adapter->service_inited)
1121                 *service_id = adapter->service_id;
1122
1123         return adapter->service_inited ? 0 : -ESRCH;
1124 }
1125
1126 int
1127 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1128 {
1129         struct event_crypto_adapter *adapter;
1130
1131         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1132
1133         adapter = eca_id_to_adapter(id);
1134         if (adapter == NULL || event_port_id == NULL)
1135                 return -EINVAL;
1136
1137         *event_port_id = adapter->event_port_id;
1138
1139         return 0;
1140 }