app/testpmd: fix flow transfer proxy port handling
[dpdk.git] / lib / eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
21
22 #define BATCH_SIZE 32
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29  * iterations of eca_crypto_adapter_enq_run()
30  */
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32
33 struct event_crypto_adapter {
34         /* Event device identifier */
35         uint8_t eventdev_id;
36         /* Event port identifier */
37         uint8_t event_port_id;
38         /* Store event device's implicit release capability */
39         uint8_t implicit_release_disabled;
40         /* Max crypto ops processed in any service function invocation */
41         uint32_t max_nb;
42         /* Lock to serialize config updates with service function */
43         rte_spinlock_t lock;
44         /* Next crypto device to be processed */
45         uint16_t next_cdev_id;
46         /* Per crypto device structure */
47         struct crypto_device_info *cdevs;
48         /* Loop counter to flush crypto ops */
49         uint16_t transmit_loop_count;
50         /* Per instance stats structure */
51         struct rte_event_crypto_adapter_stats crypto_stats;
52         /* Configuration callback for rte_service configuration */
53         rte_event_crypto_adapter_conf_cb conf_cb;
54         /* Configuration callback argument */
55         void *conf_arg;
56         /* Set if  default_cb is being used */
57         int default_cb_arg;
58         /* Service initialization state */
59         uint8_t service_inited;
60         /* Memory allocation name */
61         char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62         /* Socket identifier cached from eventdev */
63         int socket_id;
64         /* Per adapter EAL service */
65         uint32_t service_id;
66         /* No. of queue pairs configured */
67         uint16_t nb_qps;
68         /* Adapter mode */
69         enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
71
72 /* Per crypto device information */
73 struct crypto_device_info {
74         /* Pointer to cryptodev */
75         struct rte_cryptodev *dev;
76         /* Pointer to queue pair info */
77         struct crypto_queue_pair_info *qpairs;
78         /* Next queue pair to be processed */
79         uint16_t next_queue_pair_id;
80         /* Set to indicate cryptodev->eventdev packet
81          * transfer uses a hardware mechanism
82          */
83         uint8_t internal_event_port;
84         /* Set to indicate processing has been started */
85         uint8_t dev_started;
86         /* If num_qpairs > 0, the start callback will
87          * be invoked if not already invoked
88          */
89         uint16_t num_qpairs;
90 } __rte_cache_aligned;
91
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94         /* Set to indicate queue pair is enabled */
95         bool qp_enabled;
96         /* Pointer to hold rte_crypto_ops for batching */
97         struct rte_crypto_op **op_buffer;
98         /* No of crypto ops accumulated */
99         uint8_t len;
100 } __rte_cache_aligned;
101
102 static struct event_crypto_adapter **event_crypto_adapter;
103
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106         if (!eca_valid_id(id)) { \
107                 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
108                 return retval; \
109         } \
110 } while (0)
111
112 static inline int
113 eca_valid_id(uint8_t id)
114 {
115         return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
116 }
117
118 static int
119 eca_init(void)
120 {
121         const char *name = "crypto_adapter_array";
122         const struct rte_memzone *mz;
123         unsigned int sz;
124
125         sz = sizeof(*event_crypto_adapter) *
126             RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
128
129         mz = rte_memzone_lookup(name);
130         if (mz == NULL) {
131                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132                                                  RTE_CACHE_LINE_SIZE);
133                 if (mz == NULL) {
134                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
135                                         PRId32, rte_errno);
136                         return -rte_errno;
137                 }
138         }
139
140         event_crypto_adapter = mz->addr;
141         return 0;
142 }
143
144 static inline struct event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
146 {
147         return event_crypto_adapter ?
148                 event_crypto_adapter[id] : NULL;
149 }
150
151 static int
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153                         struct rte_event_crypto_adapter_conf *conf, void *arg)
154 {
155         struct rte_event_dev_config dev_conf;
156         struct rte_eventdev *dev;
157         uint8_t port_id;
158         int started;
159         int ret;
160         struct rte_event_port_conf *port_conf = arg;
161         struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
162
163         if (adapter == NULL)
164                 return -EINVAL;
165
166         dev = &rte_eventdevs[adapter->eventdev_id];
167         dev_conf = dev->data->dev_conf;
168
169         started = dev->data->dev_started;
170         if (started)
171                 rte_event_dev_stop(dev_id);
172         port_id = dev_conf.nb_event_ports;
173         dev_conf.nb_event_ports += 1;
174         ret = rte_event_dev_configure(dev_id, &dev_conf);
175         if (ret) {
176                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
177                 if (started) {
178                         if (rte_event_dev_start(dev_id))
179                                 return -EIO;
180                 }
181                 return ret;
182         }
183
184         ret = rte_event_port_setup(dev_id, port_id, port_conf);
185         if (ret) {
186                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
187                 return ret;
188         }
189
190         conf->event_port_id = port_id;
191         conf->max_nb = DEFAULT_MAX_NB;
192         if (started)
193                 ret = rte_event_dev_start(dev_id);
194
195         adapter->default_cb_arg = 1;
196         return ret;
197 }
198
199 int
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201                                 rte_event_crypto_adapter_conf_cb conf_cb,
202                                 enum rte_event_crypto_adapter_mode mode,
203                                 void *conf_arg)
204 {
205         struct event_crypto_adapter *adapter;
206         char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207         struct rte_event_dev_info dev_info;
208         int socket_id;
209         uint8_t i;
210         int ret;
211
212         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
214         if (conf_cb == NULL)
215                 return -EINVAL;
216
217         if (event_crypto_adapter == NULL) {
218                 ret = eca_init();
219                 if (ret)
220                         return ret;
221         }
222
223         adapter = eca_id_to_adapter(id);
224         if (adapter != NULL) {
225                 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
226                 return -EEXIST;
227         }
228
229         socket_id = rte_event_dev_socket_id(dev_id);
230         snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231                  "rte_event_crypto_adapter_%d", id);
232
233         adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234                         RTE_CACHE_LINE_SIZE, socket_id);
235         if (adapter == NULL) {
236                 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
237                 return -ENOMEM;
238         }
239
240         ret = rte_event_dev_info_get(dev_id, &dev_info);
241         if (ret < 0) {
242                 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243                                  dev_id, dev_info.driver_name);
244                 rte_free(adapter);
245                 return ret;
246         }
247
248         adapter->implicit_release_disabled = (dev_info.event_dev_cap &
249                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
250         adapter->eventdev_id = dev_id;
251         adapter->socket_id = socket_id;
252         adapter->conf_cb = conf_cb;
253         adapter->conf_arg = conf_arg;
254         adapter->mode = mode;
255         strcpy(adapter->mem_name, mem_name);
256         adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
257                                         rte_cryptodev_count() *
258                                         sizeof(struct crypto_device_info), 0,
259                                         socket_id);
260         if (adapter->cdevs == NULL) {
261                 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
262                 rte_free(adapter);
263                 return -ENOMEM;
264         }
265
266         rte_spinlock_init(&adapter->lock);
267         for (i = 0; i < rte_cryptodev_count(); i++)
268                 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
269
270         event_crypto_adapter[id] = adapter;
271
272         rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
273                 mode);
274         return 0;
275 }
276
277
278 int
279 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
280                                 struct rte_event_port_conf *port_config,
281                                 enum rte_event_crypto_adapter_mode mode)
282 {
283         struct rte_event_port_conf *pc;
284         int ret;
285
286         if (port_config == NULL)
287                 return -EINVAL;
288         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289
290         pc = rte_malloc(NULL, sizeof(*pc), 0);
291         if (pc == NULL)
292                 return -ENOMEM;
293         *pc = *port_config;
294         ret = rte_event_crypto_adapter_create_ext(id, dev_id,
295                                                   eca_default_config_cb,
296                                                   mode,
297                                                   pc);
298         if (ret)
299                 rte_free(pc);
300
301         return ret;
302 }
303
304 int
305 rte_event_crypto_adapter_free(uint8_t id)
306 {
307         struct event_crypto_adapter *adapter;
308
309         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
310
311         adapter = eca_id_to_adapter(id);
312         if (adapter == NULL)
313                 return -EINVAL;
314
315         if (adapter->nb_qps) {
316                 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
317                                 adapter->nb_qps);
318                 return -EBUSY;
319         }
320
321         rte_eventdev_trace_crypto_adapter_free(id, adapter);
322         if (adapter->default_cb_arg)
323                 rte_free(adapter->conf_arg);
324         rte_free(adapter->cdevs);
325         rte_free(adapter);
326         event_crypto_adapter[id] = NULL;
327
328         return 0;
329 }
330
331 static inline unsigned int
332 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
333                      unsigned int cnt)
334 {
335         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
336         union rte_event_crypto_metadata *m_data = NULL;
337         struct crypto_queue_pair_info *qp_info = NULL;
338         struct rte_crypto_op *crypto_op;
339         unsigned int i, n;
340         uint16_t qp_id, len, ret;
341         uint8_t cdev_id;
342
343         len = 0;
344         ret = 0;
345         n = 0;
346         stats->event_deq_count += cnt;
347
348         for (i = 0; i < cnt; i++) {
349                 crypto_op = ev[i].event_ptr;
350                 if (crypto_op == NULL)
351                         continue;
352                 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
353                         m_data = rte_cryptodev_sym_session_get_user_data(
354                                         crypto_op->sym->session);
355                         if (m_data == NULL) {
356                                 rte_pktmbuf_free(crypto_op->sym->m_src);
357                                 rte_crypto_op_free(crypto_op);
358                                 continue;
359                         }
360
361                         cdev_id = m_data->request_info.cdev_id;
362                         qp_id = m_data->request_info.queue_pair_id;
363                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
364                         if (!qp_info->qp_enabled) {
365                                 rte_pktmbuf_free(crypto_op->sym->m_src);
366                                 rte_crypto_op_free(crypto_op);
367                                 continue;
368                         }
369                         len = qp_info->len;
370                         qp_info->op_buffer[len] = crypto_op;
371                         len++;
372                 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
373                                 crypto_op->private_data_offset) {
374                         m_data = (union rte_event_crypto_metadata *)
375                                  ((uint8_t *)crypto_op +
376                                         crypto_op->private_data_offset);
377                         cdev_id = m_data->request_info.cdev_id;
378                         qp_id = m_data->request_info.queue_pair_id;
379                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
380                         if (!qp_info->qp_enabled) {
381                                 rte_pktmbuf_free(crypto_op->sym->m_src);
382                                 rte_crypto_op_free(crypto_op);
383                                 continue;
384                         }
385                         len = qp_info->len;
386                         qp_info->op_buffer[len] = crypto_op;
387                         len++;
388                 } else {
389                         rte_pktmbuf_free(crypto_op->sym->m_src);
390                         rte_crypto_op_free(crypto_op);
391                         continue;
392                 }
393
394                 if (len == BATCH_SIZE) {
395                         struct rte_crypto_op **op_buffer = qp_info->op_buffer;
396                         ret = rte_cryptodev_enqueue_burst(cdev_id,
397                                                           qp_id,
398                                                           op_buffer,
399                                                           BATCH_SIZE);
400
401                         stats->crypto_enq_count += ret;
402
403                         while (ret < len) {
404                                 struct rte_crypto_op *op;
405                                 op = op_buffer[ret++];
406                                 stats->crypto_enq_fail++;
407                                 rte_pktmbuf_free(op->sym->m_src);
408                                 rte_crypto_op_free(op);
409                         }
410
411                         len = 0;
412                 }
413
414                 if (qp_info)
415                         qp_info->len = len;
416                 n += ret;
417         }
418
419         return n;
420 }
421
422 static unsigned int
423 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
424 {
425         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
426         struct crypto_device_info *curr_dev;
427         struct crypto_queue_pair_info *curr_queue;
428         struct rte_crypto_op **op_buffer;
429         struct rte_cryptodev *dev;
430         uint8_t cdev_id;
431         uint16_t qp;
432         uint16_t ret;
433         uint16_t num_cdev = rte_cryptodev_count();
434
435         ret = 0;
436         for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
437                 curr_dev = &adapter->cdevs[cdev_id];
438                 dev = curr_dev->dev;
439                 if (dev == NULL)
440                         continue;
441                 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
442
443                         curr_queue = &curr_dev->qpairs[qp];
444                         if (!curr_queue->qp_enabled)
445                                 continue;
446
447                         op_buffer = curr_queue->op_buffer;
448                         ret = rte_cryptodev_enqueue_burst(cdev_id,
449                                                           qp,
450                                                           op_buffer,
451                                                           curr_queue->len);
452                         stats->crypto_enq_count += ret;
453
454                         while (ret < curr_queue->len) {
455                                 struct rte_crypto_op *op;
456                                 op = op_buffer[ret++];
457                                 stats->crypto_enq_fail++;
458                                 rte_pktmbuf_free(op->sym->m_src);
459                                 rte_crypto_op_free(op);
460                         }
461                         curr_queue->len = 0;
462                 }
463         }
464
465         return ret;
466 }
467
468 static int
469 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
470                            unsigned int max_enq)
471 {
472         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
473         struct rte_event ev[BATCH_SIZE];
474         unsigned int nb_enq, nb_enqueued;
475         uint16_t n;
476         uint8_t event_dev_id = adapter->eventdev_id;
477         uint8_t event_port_id = adapter->event_port_id;
478
479         nb_enqueued = 0;
480         if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
481                 return 0;
482
483         for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
484                 stats->event_poll_count++;
485                 n = rte_event_dequeue_burst(event_dev_id,
486                                             event_port_id, ev, BATCH_SIZE, 0);
487
488                 if (!n)
489                         break;
490
491                 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
492         }
493
494         if ((++adapter->transmit_loop_count &
495                 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
496                 nb_enqueued += eca_crypto_enq_flush(adapter);
497         }
498
499         return nb_enqueued;
500 }
501
502 static inline void
503 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
504                       struct rte_crypto_op **ops, uint16_t num)
505 {
506         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
507         union rte_event_crypto_metadata *m_data = NULL;
508         uint8_t event_dev_id = adapter->eventdev_id;
509         uint8_t event_port_id = adapter->event_port_id;
510         struct rte_event events[BATCH_SIZE];
511         uint16_t nb_enqueued, nb_ev;
512         uint8_t retry;
513         uint8_t i;
514
515         nb_ev = 0;
516         retry = 0;
517         nb_enqueued = 0;
518         num = RTE_MIN(num, BATCH_SIZE);
519         for (i = 0; i < num; i++) {
520                 struct rte_event *ev = &events[nb_ev++];
521                 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
522                         m_data = rte_cryptodev_sym_session_get_user_data(
523                                         ops[i]->sym->session);
524                 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
525                                 ops[i]->private_data_offset) {
526                         m_data = (union rte_event_crypto_metadata *)
527                                  ((uint8_t *)ops[i] +
528                                   ops[i]->private_data_offset);
529                 }
530
531                 if (unlikely(m_data == NULL)) {
532                         rte_pktmbuf_free(ops[i]->sym->m_src);
533                         rte_crypto_op_free(ops[i]);
534                         continue;
535                 }
536
537                 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
538                 ev->event_ptr = ops[i];
539                 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
540                 if (adapter->implicit_release_disabled)
541                         ev->op = RTE_EVENT_OP_FORWARD;
542                 else
543                         ev->op = RTE_EVENT_OP_NEW;
544         }
545
546         do {
547                 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
548                                                   event_port_id,
549                                                   &events[nb_enqueued],
550                                                   nb_ev - nb_enqueued);
551         } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
552                  nb_enqueued < nb_ev);
553
554         /* Free mbufs and rte_crypto_ops for failed events */
555         for (i = nb_enqueued; i < nb_ev; i++) {
556                 struct rte_crypto_op *op = events[i].event_ptr;
557                 rte_pktmbuf_free(op->sym->m_src);
558                 rte_crypto_op_free(op);
559         }
560
561         stats->event_enq_fail_count += nb_ev - nb_enqueued;
562         stats->event_enq_count += nb_enqueued;
563         stats->event_enq_retry_count += retry - 1;
564 }
565
566 static inline unsigned int
567 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
568                            unsigned int max_deq)
569 {
570         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
571         struct crypto_device_info *curr_dev;
572         struct crypto_queue_pair_info *curr_queue;
573         struct rte_crypto_op *ops[BATCH_SIZE];
574         uint16_t n, nb_deq;
575         struct rte_cryptodev *dev;
576         uint8_t cdev_id;
577         uint16_t qp, dev_qps;
578         bool done;
579         uint16_t num_cdev = rte_cryptodev_count();
580
581         nb_deq = 0;
582         do {
583                 uint16_t queues = 0;
584                 done = true;
585
586                 for (cdev_id = adapter->next_cdev_id;
587                         cdev_id < num_cdev; cdev_id++) {
588                         curr_dev = &adapter->cdevs[cdev_id];
589                         dev = curr_dev->dev;
590                         if (dev == NULL)
591                                 continue;
592                         dev_qps = dev->data->nb_queue_pairs;
593
594                         for (qp = curr_dev->next_queue_pair_id;
595                                 queues < dev_qps; qp = (qp + 1) % dev_qps,
596                                 queues++) {
597
598                                 curr_queue = &curr_dev->qpairs[qp];
599                                 if (!curr_queue->qp_enabled)
600                                         continue;
601
602                                 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
603                                         ops, BATCH_SIZE);
604                                 if (!n)
605                                         continue;
606
607                                 done = false;
608                                 stats->crypto_deq_count += n;
609                                 eca_ops_enqueue_burst(adapter, ops, n);
610                                 nb_deq += n;
611
612                                 if (nb_deq > max_deq) {
613                                         if ((qp + 1) == dev_qps) {
614                                                 adapter->next_cdev_id =
615                                                         (cdev_id + 1)
616                                                         % num_cdev;
617                                         }
618                                         curr_dev->next_queue_pair_id = (qp + 1)
619                                                 % dev->data->nb_queue_pairs;
620
621                                         return nb_deq;
622                                 }
623                         }
624                 }
625         } while (done == false);
626         return nb_deq;
627 }
628
629 static void
630 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
631                        unsigned int max_ops)
632 {
633         unsigned int ops_left = max_ops;
634
635         while (ops_left > 0) {
636                 unsigned int e_cnt, d_cnt;
637
638                 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
639                 ops_left -= RTE_MIN(ops_left, e_cnt);
640
641                 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
642                 ops_left -= RTE_MIN(ops_left, d_cnt);
643
644                 if (e_cnt == 0 && d_cnt == 0)
645                         break;
646
647         }
648
649         if (ops_left == max_ops)
650                 rte_event_maintain(adapter->eventdev_id,
651                                    adapter->event_port_id, 0);
652 }
653
654 static int
655 eca_service_func(void *args)
656 {
657         struct event_crypto_adapter *adapter = args;
658
659         if (rte_spinlock_trylock(&adapter->lock) == 0)
660                 return 0;
661         eca_crypto_adapter_run(adapter, adapter->max_nb);
662         rte_spinlock_unlock(&adapter->lock);
663
664         return 0;
665 }
666
667 static int
668 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
669 {
670         struct rte_event_crypto_adapter_conf adapter_conf;
671         struct rte_service_spec service;
672         int ret;
673
674         if (adapter->service_inited)
675                 return 0;
676
677         memset(&service, 0, sizeof(service));
678         snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
679                 "rte_event_crypto_adapter_%d", id);
680         service.socket_id = adapter->socket_id;
681         service.callback = eca_service_func;
682         service.callback_userdata = adapter;
683         /* Service function handles locking for queue add/del updates */
684         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
685         ret = rte_service_component_register(&service, &adapter->service_id);
686         if (ret) {
687                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
688                         service.name, ret);
689                 return ret;
690         }
691
692         ret = adapter->conf_cb(id, adapter->eventdev_id,
693                 &adapter_conf, adapter->conf_arg);
694         if (ret) {
695                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
696                         ret);
697                 return ret;
698         }
699
700         adapter->max_nb = adapter_conf.max_nb;
701         adapter->event_port_id = adapter_conf.event_port_id;
702         adapter->service_inited = 1;
703
704         return ret;
705 }
706
707 static void
708 eca_update_qp_info(struct event_crypto_adapter *adapter,
709                    struct crypto_device_info *dev_info, int32_t queue_pair_id,
710                    uint8_t add)
711 {
712         struct crypto_queue_pair_info *qp_info;
713         int enabled;
714         uint16_t i;
715
716         if (dev_info->qpairs == NULL)
717                 return;
718
719         if (queue_pair_id == -1) {
720                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
721                         eca_update_qp_info(adapter, dev_info, i, add);
722         } else {
723                 qp_info = &dev_info->qpairs[queue_pair_id];
724                 enabled = qp_info->qp_enabled;
725                 if (add) {
726                         adapter->nb_qps += !enabled;
727                         dev_info->num_qpairs += !enabled;
728                 } else {
729                         adapter->nb_qps -= enabled;
730                         dev_info->num_qpairs -= enabled;
731                 }
732                 qp_info->qp_enabled = !!add;
733         }
734 }
735
736 static int
737 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
738                    int queue_pair_id)
739 {
740         struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
741         struct crypto_queue_pair_info *qpairs;
742         uint32_t i;
743
744         if (dev_info->qpairs == NULL) {
745                 dev_info->qpairs =
746                     rte_zmalloc_socket(adapter->mem_name,
747                                         dev_info->dev->data->nb_queue_pairs *
748                                         sizeof(struct crypto_queue_pair_info),
749                                         0, adapter->socket_id);
750                 if (dev_info->qpairs == NULL)
751                         return -ENOMEM;
752
753                 qpairs = dev_info->qpairs;
754                 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
755                                         BATCH_SIZE *
756                                         sizeof(struct rte_crypto_op *),
757                                         0, adapter->socket_id);
758                 if (!qpairs->op_buffer) {
759                         rte_free(qpairs);
760                         return -ENOMEM;
761                 }
762         }
763
764         if (queue_pair_id == -1) {
765                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
766                         eca_update_qp_info(adapter, dev_info, i, 1);
767         } else
768                 eca_update_qp_info(adapter, dev_info,
769                                         (uint16_t)queue_pair_id, 1);
770
771         return 0;
772 }
773
774 int
775 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
776                         uint8_t cdev_id,
777                         int32_t queue_pair_id,
778                         const struct rte_event *event)
779 {
780         struct event_crypto_adapter *adapter;
781         struct rte_eventdev *dev;
782         struct crypto_device_info *dev_info;
783         uint32_t cap;
784         int ret;
785
786         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
787
788         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
789                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
790                 return -EINVAL;
791         }
792
793         adapter = eca_id_to_adapter(id);
794         if (adapter == NULL)
795                 return -EINVAL;
796
797         dev = &rte_eventdevs[adapter->eventdev_id];
798         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
799                                                 cdev_id,
800                                                 &cap);
801         if (ret) {
802                 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
803                         " cdev %" PRIu8, id, cdev_id);
804                 return ret;
805         }
806
807         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
808             (event == NULL)) {
809                 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
810                                   cdev_id);
811                 return -EINVAL;
812         }
813
814         dev_info = &adapter->cdevs[cdev_id];
815
816         if (queue_pair_id != -1 &&
817             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
818                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
819                                  (uint16_t)queue_pair_id);
820                 return -EINVAL;
821         }
822
823         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
824          * no need of service core as HW supports event forward capability.
825          */
826         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
827             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
828              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
829             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
830              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
831                 RTE_FUNC_PTR_OR_ERR_RET(
832                         *dev->dev_ops->crypto_adapter_queue_pair_add,
833                         -ENOTSUP);
834                 if (dev_info->qpairs == NULL) {
835                         dev_info->qpairs =
836                             rte_zmalloc_socket(adapter->mem_name,
837                                         dev_info->dev->data->nb_queue_pairs *
838                                         sizeof(struct crypto_queue_pair_info),
839                                         0, adapter->socket_id);
840                         if (dev_info->qpairs == NULL)
841                                 return -ENOMEM;
842                 }
843
844                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
845                                 dev_info->dev,
846                                 queue_pair_id,
847                                 event);
848                 if (ret)
849                         return ret;
850
851                 else
852                         eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
853                                            queue_pair_id, 1);
854         }
855
856         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
857          * or SW adapter, initiate services so the application can choose
858          * which ever way it wants to use the adapter.
859          * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
860          *         Application may wants to use one of below two mode
861          *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
862          *          b. OP_NEW mode -> HW Dequeue
863          * Case 2: No HW caps, use SW adapter
864          *          a. OP_FORWARD mode -> SW enqueue & dequeue
865          *          b. OP_NEW mode -> SW Dequeue
866          */
867         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
868              !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
869              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
870              (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
871               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
872               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
873                (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
874                 rte_spinlock_lock(&adapter->lock);
875                 ret = eca_init_service(adapter, id);
876                 if (ret == 0)
877                         ret = eca_add_queue_pair(adapter, cdev_id,
878                                                  queue_pair_id);
879                 rte_spinlock_unlock(&adapter->lock);
880
881                 if (ret)
882                         return ret;
883
884                 rte_service_component_runstate_set(adapter->service_id, 1);
885         }
886
887         rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
888                 queue_pair_id);
889         return 0;
890 }
891
892 int
893 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
894                                         int32_t queue_pair_id)
895 {
896         struct event_crypto_adapter *adapter;
897         struct crypto_device_info *dev_info;
898         struct rte_eventdev *dev;
899         int ret;
900         uint32_t cap;
901         uint16_t i;
902
903         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
904
905         if (!rte_cryptodev_is_valid_dev(cdev_id)) {
906                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
907                 return -EINVAL;
908         }
909
910         adapter = eca_id_to_adapter(id);
911         if (adapter == NULL)
912                 return -EINVAL;
913
914         dev = &rte_eventdevs[adapter->eventdev_id];
915         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
916                                                 cdev_id,
917                                                 &cap);
918         if (ret)
919                 return ret;
920
921         dev_info = &adapter->cdevs[cdev_id];
922
923         if (queue_pair_id != -1 &&
924             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
925                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
926                                  (uint16_t)queue_pair_id);
927                 return -EINVAL;
928         }
929
930         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
931             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
932              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
933                 RTE_FUNC_PTR_OR_ERR_RET(
934                         *dev->dev_ops->crypto_adapter_queue_pair_del,
935                         -ENOTSUP);
936                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
937                                                 dev_info->dev,
938                                                 queue_pair_id);
939                 if (ret == 0) {
940                         eca_update_qp_info(adapter,
941                                         &adapter->cdevs[cdev_id],
942                                         queue_pair_id,
943                                         0);
944                         if (dev_info->num_qpairs == 0) {
945                                 rte_free(dev_info->qpairs);
946                                 dev_info->qpairs = NULL;
947                         }
948                 }
949         } else {
950                 if (adapter->nb_qps == 0)
951                         return 0;
952
953                 rte_spinlock_lock(&adapter->lock);
954                 if (queue_pair_id == -1) {
955                         for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
956                                 i++)
957                                 eca_update_qp_info(adapter, dev_info,
958                                                         queue_pair_id, 0);
959                 } else {
960                         eca_update_qp_info(adapter, dev_info,
961                                                 (uint16_t)queue_pair_id, 0);
962                 }
963
964                 if (dev_info->num_qpairs == 0) {
965                         rte_free(dev_info->qpairs);
966                         dev_info->qpairs = NULL;
967                 }
968
969                 rte_spinlock_unlock(&adapter->lock);
970                 rte_service_component_runstate_set(adapter->service_id,
971                                 adapter->nb_qps);
972         }
973
974         rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
975                 queue_pair_id, ret);
976         return ret;
977 }
978
979 static int
980 eca_adapter_ctrl(uint8_t id, int start)
981 {
982         struct event_crypto_adapter *adapter;
983         struct crypto_device_info *dev_info;
984         struct rte_eventdev *dev;
985         uint32_t i;
986         int use_service;
987         int stop = !start;
988
989         use_service = 0;
990         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
991         adapter = eca_id_to_adapter(id);
992         if (adapter == NULL)
993                 return -EINVAL;
994
995         dev = &rte_eventdevs[adapter->eventdev_id];
996
997         for (i = 0; i < rte_cryptodev_count(); i++) {
998                 dev_info = &adapter->cdevs[i];
999                 /* if start  check for num queue pairs */
1000                 if (start && !dev_info->num_qpairs)
1001                         continue;
1002                 /* if stop check if dev has been started */
1003                 if (stop && !dev_info->dev_started)
1004                         continue;
1005                 use_service |= !dev_info->internal_event_port;
1006                 dev_info->dev_started = start;
1007                 if (dev_info->internal_event_port == 0)
1008                         continue;
1009                 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1010                                                 &dev_info->dev[i]) :
1011                         (*dev->dev_ops->crypto_adapter_stop)(dev,
1012                                                 &dev_info->dev[i]);
1013         }
1014
1015         if (use_service)
1016                 rte_service_runstate_set(adapter->service_id, start);
1017
1018         return 0;
1019 }
1020
1021 int
1022 rte_event_crypto_adapter_start(uint8_t id)
1023 {
1024         struct event_crypto_adapter *adapter;
1025
1026         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1027         adapter = eca_id_to_adapter(id);
1028         if (adapter == NULL)
1029                 return -EINVAL;
1030
1031         rte_eventdev_trace_crypto_adapter_start(id, adapter);
1032         return eca_adapter_ctrl(id, 1);
1033 }
1034
1035 int
1036 rte_event_crypto_adapter_stop(uint8_t id)
1037 {
1038         rte_eventdev_trace_crypto_adapter_stop(id);
1039         return eca_adapter_ctrl(id, 0);
1040 }
1041
1042 int
1043 rte_event_crypto_adapter_stats_get(uint8_t id,
1044                                 struct rte_event_crypto_adapter_stats *stats)
1045 {
1046         struct event_crypto_adapter *adapter;
1047         struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1048         struct rte_event_crypto_adapter_stats dev_stats;
1049         struct rte_eventdev *dev;
1050         struct crypto_device_info *dev_info;
1051         uint32_t i;
1052         int ret;
1053
1054         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1055
1056         adapter = eca_id_to_adapter(id);
1057         if (adapter == NULL || stats == NULL)
1058                 return -EINVAL;
1059
1060         dev = &rte_eventdevs[adapter->eventdev_id];
1061         memset(stats, 0, sizeof(*stats));
1062         for (i = 0; i < rte_cryptodev_count(); i++) {
1063                 dev_info = &adapter->cdevs[i];
1064                 if (dev_info->internal_event_port == 0 ||
1065                         dev->dev_ops->crypto_adapter_stats_get == NULL)
1066                         continue;
1067                 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1068                                                 dev_info->dev,
1069                                                 &dev_stats);
1070                 if (ret)
1071                         continue;
1072
1073                 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1074                 dev_stats_sum.event_enq_count +=
1075                         dev_stats.event_enq_count;
1076         }
1077
1078         if (adapter->service_inited)
1079                 *stats = adapter->crypto_stats;
1080
1081         stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1082         stats->event_enq_count += dev_stats_sum.event_enq_count;
1083
1084         return 0;
1085 }
1086
1087 int
1088 rte_event_crypto_adapter_stats_reset(uint8_t id)
1089 {
1090         struct event_crypto_adapter *adapter;
1091         struct crypto_device_info *dev_info;
1092         struct rte_eventdev *dev;
1093         uint32_t i;
1094
1095         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1096
1097         adapter = eca_id_to_adapter(id);
1098         if (adapter == NULL)
1099                 return -EINVAL;
1100
1101         dev = &rte_eventdevs[adapter->eventdev_id];
1102         for (i = 0; i < rte_cryptodev_count(); i++) {
1103                 dev_info = &adapter->cdevs[i];
1104                 if (dev_info->internal_event_port == 0 ||
1105                         dev->dev_ops->crypto_adapter_stats_reset == NULL)
1106                         continue;
1107                 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1108                                                 dev_info->dev);
1109         }
1110
1111         memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1112         return 0;
1113 }
1114
1115 int
1116 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1117 {
1118         struct event_crypto_adapter *adapter;
1119
1120         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1121
1122         adapter = eca_id_to_adapter(id);
1123         if (adapter == NULL || service_id == NULL)
1124                 return -EINVAL;
1125
1126         if (adapter->service_inited)
1127                 *service_id = adapter->service_id;
1128
1129         return adapter->service_inited ? 0 : -ESRCH;
1130 }
1131
1132 int
1133 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1134 {
1135         struct event_crypto_adapter *adapter;
1136
1137         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1138
1139         adapter = eca_id_to_adapter(id);
1140         if (adapter == NULL || event_port_id == NULL)
1141                 return -EINVAL;
1142
1143         *event_port_id = adapter->event_port_id;
1144
1145         return 0;
1146 }