1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
8 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
29 #define CRYPTO_ADAPTER_BUFFER_SZ 1024
31 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
32 * iterations of eca_crypto_adapter_enq_run()
34 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
36 struct crypto_ops_circular_buffer {
37 /* index of head element in circular buffer */
39 /* index of tail element in circular buffer */
41 /* number of elements in buffer */
43 /* size of circular buffer */
45 /* Pointer to hold rte_crypto_ops for batching */
46 struct rte_crypto_op **op_buffer;
47 } __rte_cache_aligned;
49 struct event_crypto_adapter {
50 /* Event device identifier */
52 /* Event port identifier */
53 uint8_t event_port_id;
54 /* Store event device's implicit release capability */
55 uint8_t implicit_release_disabled;
56 /* Flag to indicate backpressure at cryptodev
57 * Stop further dequeuing events from eventdev
59 bool stop_enq_to_cryptodev;
60 /* Max crypto ops processed in any service function invocation */
62 /* Lock to serialize config updates with service function */
64 /* Next crypto device to be processed */
65 uint16_t next_cdev_id;
66 /* Per crypto device structure */
67 struct crypto_device_info *cdevs;
68 /* Loop counter to flush crypto ops */
69 uint16_t transmit_loop_count;
70 /* Circular buffer for batching crypto ops to eventdev */
71 struct crypto_ops_circular_buffer ebuf;
72 /* Per instance stats structure */
73 struct rte_event_crypto_adapter_stats crypto_stats;
74 /* Configuration callback for rte_service configuration */
75 rte_event_crypto_adapter_conf_cb conf_cb;
76 /* Configuration callback argument */
78 /* Set if default_cb is being used */
80 /* Service initialization state */
81 uint8_t service_inited;
82 /* Memory allocation name */
83 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
84 /* Socket identifier cached from eventdev */
86 /* Per adapter EAL service */
88 /* No. of queue pairs configured */
91 enum rte_event_crypto_adapter_mode mode;
92 } __rte_cache_aligned;
94 /* Per crypto device information */
95 struct crypto_device_info {
96 /* Pointer to cryptodev */
97 struct rte_cryptodev *dev;
98 /* Pointer to queue pair info */
99 struct crypto_queue_pair_info *qpairs;
100 /* Next queue pair to be processed */
101 uint16_t next_queue_pair_id;
102 /* Set to indicate cryptodev->eventdev packet
103 * transfer uses a hardware mechanism
105 uint8_t internal_event_port;
106 /* Set to indicate processing has been started */
108 /* If num_qpairs > 0, the start callback will
109 * be invoked if not already invoked
112 } __rte_cache_aligned;
114 /* Per queue pair information */
115 struct crypto_queue_pair_info {
116 /* Set to indicate queue pair is enabled */
118 /* Circular buffer for batching crypto ops to cdev */
119 struct crypto_ops_circular_buffer cbuf;
120 } __rte_cache_aligned;
122 static struct event_crypto_adapter **event_crypto_adapter;
124 /* Macros to check for valid adapter */
125 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
126 if (!eca_valid_id(id)) { \
127 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
133 eca_valid_id(uint8_t id)
135 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
141 const char *name = "crypto_adapter_array";
142 const struct rte_memzone *mz;
145 sz = sizeof(*event_crypto_adapter) *
146 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
147 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
149 mz = rte_memzone_lookup(name);
151 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
152 RTE_CACHE_LINE_SIZE);
154 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
160 event_crypto_adapter = mz->addr;
165 eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp)
167 return bufp->count >= BATCH_SIZE;
171 eca_circular_buffer_space_for_batch(struct crypto_ops_circular_buffer *bufp)
173 return (bufp->size - bufp->count) >= BATCH_SIZE;
177 eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp)
179 rte_free(bufp->op_buffer);
183 eca_circular_buffer_init(const char *name,
184 struct crypto_ops_circular_buffer *bufp,
187 bufp->op_buffer = rte_zmalloc(name,
188 sizeof(struct rte_crypto_op *) * sz,
190 if (bufp->op_buffer == NULL)
198 eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp,
199 struct rte_crypto_op *op)
201 uint16_t *tailp = &bufp->tail;
203 bufp->op_buffer[*tailp] = op;
204 /* circular buffer, go round */
205 *tailp = (*tailp + 1) % bufp->size;
212 eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp,
213 uint8_t cdev_id, uint16_t qp_id,
214 uint16_t *nb_ops_flushed)
217 uint16_t *headp = &bufp->head;
218 uint16_t *tailp = &bufp->tail;
219 struct rte_crypto_op **ops = bufp->op_buffer;
223 else if (*tailp < *headp)
224 n = bufp->size - *headp;
227 return 0; /* buffer empty */
230 *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id,
232 bufp->count -= *nb_ops_flushed;
237 *headp = (*headp + *nb_ops_flushed) % bufp->size;
239 return *nb_ops_flushed == n ? 0 : -1;
242 static inline struct event_crypto_adapter *
243 eca_id_to_adapter(uint8_t id)
245 return event_crypto_adapter ?
246 event_crypto_adapter[id] : NULL;
250 eca_default_config_cb(uint8_t id, uint8_t dev_id,
251 struct rte_event_crypto_adapter_conf *conf, void *arg)
253 struct rte_event_dev_config dev_conf;
254 struct rte_eventdev *dev;
258 struct rte_event_port_conf *port_conf = arg;
259 struct event_crypto_adapter *adapter = eca_id_to_adapter(id);
264 dev = &rte_eventdevs[adapter->eventdev_id];
265 dev_conf = dev->data->dev_conf;
267 started = dev->data->dev_started;
269 rte_event_dev_stop(dev_id);
270 port_id = dev_conf.nb_event_ports;
271 dev_conf.nb_event_ports += 1;
272 ret = rte_event_dev_configure(dev_id, &dev_conf);
274 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
276 if (rte_event_dev_start(dev_id))
282 ret = rte_event_port_setup(dev_id, port_id, port_conf);
284 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
288 conf->event_port_id = port_id;
289 conf->max_nb = DEFAULT_MAX_NB;
291 ret = rte_event_dev_start(dev_id);
293 adapter->default_cb_arg = 1;
298 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
299 rte_event_crypto_adapter_conf_cb conf_cb,
300 enum rte_event_crypto_adapter_mode mode,
303 struct event_crypto_adapter *adapter;
304 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
305 struct rte_event_dev_info dev_info;
310 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
311 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
315 if (event_crypto_adapter == NULL) {
321 adapter = eca_id_to_adapter(id);
322 if (adapter != NULL) {
323 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
327 socket_id = rte_event_dev_socket_id(dev_id);
328 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
329 "rte_event_crypto_adapter_%d", id);
331 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
332 RTE_CACHE_LINE_SIZE, socket_id);
333 if (adapter == NULL) {
334 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
338 if (eca_circular_buffer_init("eca_edev_circular_buffer",
340 CRYPTO_ADAPTER_BUFFER_SZ)) {
341 RTE_EDEV_LOG_ERR("Failed to get memory for eventdev buffer");
346 ret = rte_event_dev_info_get(dev_id, &dev_info);
348 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
349 dev_id, dev_info.driver_name);
350 eca_circular_buffer_free(&adapter->ebuf);
355 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
356 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
357 adapter->eventdev_id = dev_id;
358 adapter->socket_id = socket_id;
359 adapter->conf_cb = conf_cb;
360 adapter->conf_arg = conf_arg;
361 adapter->mode = mode;
362 strcpy(adapter->mem_name, mem_name);
363 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
364 rte_cryptodev_count() *
365 sizeof(struct crypto_device_info), 0,
367 if (adapter->cdevs == NULL) {
368 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
369 eca_circular_buffer_free(&adapter->ebuf);
374 rte_spinlock_init(&adapter->lock);
375 for (i = 0; i < rte_cryptodev_count(); i++)
376 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
378 event_crypto_adapter[id] = adapter;
380 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
387 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
388 struct rte_event_port_conf *port_config,
389 enum rte_event_crypto_adapter_mode mode)
391 struct rte_event_port_conf *pc;
394 if (port_config == NULL)
396 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
398 pc = rte_malloc(NULL, sizeof(*pc), 0);
402 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
403 eca_default_config_cb,
413 rte_event_crypto_adapter_free(uint8_t id)
415 struct event_crypto_adapter *adapter;
417 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
419 adapter = eca_id_to_adapter(id);
423 if (adapter->nb_qps) {
424 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
429 rte_eventdev_trace_crypto_adapter_free(id, adapter);
430 if (adapter->default_cb_arg)
431 rte_free(adapter->conf_arg);
432 rte_free(adapter->cdevs);
434 event_crypto_adapter[id] = NULL;
439 static inline unsigned int
440 eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev,
443 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
444 union rte_event_crypto_metadata *m_data = NULL;
445 struct crypto_queue_pair_info *qp_info = NULL;
446 struct rte_crypto_op *crypto_op;
448 uint16_t qp_id, nb_enqueued = 0;
454 stats->event_deq_count += cnt;
456 for (i = 0; i < cnt; i++) {
457 crypto_op = ev[i].event_ptr;
458 if (crypto_op == NULL)
460 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
461 m_data = rte_cryptodev_sym_session_get_user_data(
462 crypto_op->sym->session);
463 if (m_data == NULL) {
464 rte_pktmbuf_free(crypto_op->sym->m_src);
465 rte_crypto_op_free(crypto_op);
469 cdev_id = m_data->request_info.cdev_id;
470 qp_id = m_data->request_info.queue_pair_id;
471 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
472 if (!qp_info->qp_enabled) {
473 rte_pktmbuf_free(crypto_op->sym->m_src);
474 rte_crypto_op_free(crypto_op);
477 eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
478 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
479 crypto_op->private_data_offset) {
480 m_data = (union rte_event_crypto_metadata *)
481 ((uint8_t *)crypto_op +
482 crypto_op->private_data_offset);
483 cdev_id = m_data->request_info.cdev_id;
484 qp_id = m_data->request_info.queue_pair_id;
485 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
486 if (!qp_info->qp_enabled) {
487 rte_pktmbuf_free(crypto_op->sym->m_src);
488 rte_crypto_op_free(crypto_op);
491 eca_circular_buffer_add(&qp_info->cbuf, crypto_op);
493 rte_pktmbuf_free(crypto_op->sym->m_src);
494 rte_crypto_op_free(crypto_op);
498 if (eca_circular_buffer_batch_ready(&qp_info->cbuf)) {
499 ret = eca_circular_buffer_flush_to_cdev(&qp_info->cbuf,
504 * If some crypto ops failed to flush to cdev and
505 * space for another batch is not available, stop
506 * dequeue from eventdev momentarily
508 if (unlikely(ret < 0 &&
509 !eca_circular_buffer_space_for_batch(
511 adapter->stop_enq_to_cryptodev = true;
514 stats->crypto_enq_count += nb_enqueued;
522 eca_crypto_cdev_flush(struct event_crypto_adapter *adapter,
523 uint8_t cdev_id, uint16_t *nb_ops_flushed)
525 struct crypto_device_info *curr_dev;
526 struct crypto_queue_pair_info *curr_queue;
527 struct rte_cryptodev *dev;
528 uint16_t nb = 0, nb_enqueued = 0;
531 curr_dev = &adapter->cdevs[cdev_id];
532 dev = rte_cryptodev_pmd_get_dev(cdev_id);
534 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
536 curr_queue = &curr_dev->qpairs[qp];
537 if (unlikely(curr_queue == NULL || !curr_queue->qp_enabled))
540 eca_circular_buffer_flush_to_cdev(&curr_queue->cbuf,
544 *nb_ops_flushed += curr_queue->cbuf.count;
552 eca_crypto_enq_flush(struct event_crypto_adapter *adapter)
554 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
556 uint16_t nb_enqueued = 0;
557 uint16_t nb_ops_flushed = 0;
558 uint16_t num_cdev = rte_cryptodev_count();
560 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++)
561 nb_enqueued += eca_crypto_cdev_flush(adapter,
565 * Enable dequeue from eventdev if all ops from circular
566 * buffer flushed to cdev
569 adapter->stop_enq_to_cryptodev = false;
571 stats->crypto_enq_count += nb_enqueued;
577 eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter,
578 unsigned int max_enq)
580 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
581 struct rte_event ev[BATCH_SIZE];
582 unsigned int nb_enq, nb_enqueued;
584 uint8_t event_dev_id = adapter->eventdev_id;
585 uint8_t event_port_id = adapter->event_port_id;
588 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
591 if (unlikely(adapter->stop_enq_to_cryptodev)) {
592 nb_enqueued += eca_crypto_enq_flush(adapter);
594 if (unlikely(adapter->stop_enq_to_cryptodev))
595 goto skip_event_dequeue_burst;
598 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
599 stats->event_poll_count++;
600 n = rte_event_dequeue_burst(event_dev_id,
601 event_port_id, ev, BATCH_SIZE, 0);
606 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
609 skip_event_dequeue_burst:
611 if ((++adapter->transmit_loop_count &
612 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
613 nb_enqueued += eca_crypto_enq_flush(adapter);
619 static inline uint16_t
620 eca_ops_enqueue_burst(struct event_crypto_adapter *adapter,
621 struct rte_crypto_op **ops, uint16_t num)
623 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
624 union rte_event_crypto_metadata *m_data = NULL;
625 uint8_t event_dev_id = adapter->eventdev_id;
626 uint8_t event_port_id = adapter->event_port_id;
627 struct rte_event events[BATCH_SIZE];
628 uint16_t nb_enqueued, nb_ev;
635 num = RTE_MIN(num, BATCH_SIZE);
636 for (i = 0; i < num; i++) {
637 struct rte_event *ev = &events[nb_ev++];
640 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
641 m_data = rte_cryptodev_sym_session_get_user_data(
642 ops[i]->sym->session);
643 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
644 ops[i]->private_data_offset) {
645 m_data = (union rte_event_crypto_metadata *)
647 ops[i]->private_data_offset);
650 if (unlikely(m_data == NULL)) {
651 rte_pktmbuf_free(ops[i]->sym->m_src);
652 rte_crypto_op_free(ops[i]);
656 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
657 ev->event_ptr = ops[i];
658 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
659 if (adapter->implicit_release_disabled)
660 ev->op = RTE_EVENT_OP_FORWARD;
662 ev->op = RTE_EVENT_OP_NEW;
666 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
668 &events[nb_enqueued],
669 nb_ev - nb_enqueued);
671 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
672 nb_enqueued < nb_ev);
674 stats->event_enq_fail_count += nb_ev - nb_enqueued;
675 stats->event_enq_count += nb_enqueued;
676 stats->event_enq_retry_count += retry - 1;
682 eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter,
683 struct crypto_ops_circular_buffer *bufp)
685 uint16_t n = 0, nb_ops_flushed;
686 uint16_t *headp = &bufp->head;
687 uint16_t *tailp = &bufp->tail;
688 struct rte_crypto_op **ops = bufp->op_buffer;
692 else if (*tailp < *headp)
693 n = bufp->size - *headp;
695 return 0; /* buffer empty */
697 nb_ops_flushed = eca_ops_enqueue_burst(adapter, ops, n);
698 bufp->count -= nb_ops_flushed;
702 return 0; /* buffer empty */
705 *headp = (*headp + nb_ops_flushed) % bufp->size;
711 eca_ops_buffer_flush(struct event_crypto_adapter *adapter)
713 if (likely(adapter->ebuf.count == 0))
716 while (eca_circular_buffer_flush_to_evdev(adapter,
720 static inline unsigned int
721 eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter,
722 unsigned int max_deq)
724 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
725 struct crypto_device_info *curr_dev;
726 struct crypto_queue_pair_info *curr_queue;
727 struct rte_crypto_op *ops[BATCH_SIZE];
728 uint16_t n, nb_deq, nb_enqueued, i;
729 struct rte_cryptodev *dev;
731 uint16_t qp, dev_qps;
733 uint16_t num_cdev = rte_cryptodev_count();
736 eca_ops_buffer_flush(adapter);
741 for (cdev_id = adapter->next_cdev_id;
742 cdev_id < num_cdev; cdev_id++) {
745 curr_dev = &adapter->cdevs[cdev_id];
747 if (unlikely(dev == NULL))
750 dev_qps = dev->data->nb_queue_pairs;
752 for (qp = curr_dev->next_queue_pair_id;
753 queues < dev_qps; qp = (qp + 1) % dev_qps,
756 curr_queue = &curr_dev->qpairs[qp];
757 if (unlikely(curr_queue == NULL ||
758 !curr_queue->qp_enabled))
761 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
769 stats->crypto_deq_count += n;
771 if (unlikely(!adapter->ebuf.count))
772 nb_enqueued = eca_ops_enqueue_burst(
775 if (likely(nb_enqueued == n))
778 /* Failed to enqueue events case */
779 for (i = nb_enqueued; i < n; i++)
780 eca_circular_buffer_add(
787 if (nb_deq >= max_deq) {
788 if ((qp + 1) == dev_qps) {
789 adapter->next_cdev_id =
793 curr_dev->next_queue_pair_id = (qp + 1)
794 % dev->data->nb_queue_pairs;
800 adapter->next_cdev_id = 0;
801 } while (done == false);
806 eca_crypto_adapter_run(struct event_crypto_adapter *adapter,
807 unsigned int max_ops)
809 unsigned int ops_left = max_ops;
811 while (ops_left > 0) {
812 unsigned int e_cnt, d_cnt;
814 e_cnt = eca_crypto_adapter_deq_run(adapter, ops_left);
815 ops_left -= RTE_MIN(ops_left, e_cnt);
817 d_cnt = eca_crypto_adapter_enq_run(adapter, ops_left);
818 ops_left -= RTE_MIN(ops_left, d_cnt);
820 if (e_cnt == 0 && d_cnt == 0)
825 if (ops_left == max_ops)
826 rte_event_maintain(adapter->eventdev_id,
827 adapter->event_port_id, 0);
831 eca_service_func(void *args)
833 struct event_crypto_adapter *adapter = args;
835 if (rte_spinlock_trylock(&adapter->lock) == 0)
837 eca_crypto_adapter_run(adapter, adapter->max_nb);
838 rte_spinlock_unlock(&adapter->lock);
844 eca_init_service(struct event_crypto_adapter *adapter, uint8_t id)
846 struct rte_event_crypto_adapter_conf adapter_conf;
847 struct rte_service_spec service;
850 if (adapter->service_inited)
853 memset(&service, 0, sizeof(service));
854 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
855 "rte_event_crypto_adapter_%d", id);
856 service.socket_id = adapter->socket_id;
857 service.callback = eca_service_func;
858 service.callback_userdata = adapter;
859 /* Service function handles locking for queue add/del updates */
860 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
861 ret = rte_service_component_register(&service, &adapter->service_id);
863 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
868 ret = adapter->conf_cb(id, adapter->eventdev_id,
869 &adapter_conf, adapter->conf_arg);
871 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
876 adapter->max_nb = adapter_conf.max_nb;
877 adapter->event_port_id = adapter_conf.event_port_id;
878 adapter->service_inited = 1;
884 eca_update_qp_info(struct event_crypto_adapter *adapter,
885 struct crypto_device_info *dev_info, int32_t queue_pair_id,
888 struct crypto_queue_pair_info *qp_info;
892 if (dev_info->qpairs == NULL)
895 if (queue_pair_id == -1) {
896 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
897 eca_update_qp_info(adapter, dev_info, i, add);
899 qp_info = &dev_info->qpairs[queue_pair_id];
900 enabled = qp_info->qp_enabled;
902 adapter->nb_qps += !enabled;
903 dev_info->num_qpairs += !enabled;
905 adapter->nb_qps -= enabled;
906 dev_info->num_qpairs -= enabled;
908 qp_info->qp_enabled = !!add;
913 eca_add_queue_pair(struct event_crypto_adapter *adapter, uint8_t cdev_id,
916 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
917 struct crypto_queue_pair_info *qpairs;
920 if (dev_info->qpairs == NULL) {
922 rte_zmalloc_socket(adapter->mem_name,
923 dev_info->dev->data->nb_queue_pairs *
924 sizeof(struct crypto_queue_pair_info),
925 0, adapter->socket_id);
926 if (dev_info->qpairs == NULL)
929 qpairs = dev_info->qpairs;
931 if (eca_circular_buffer_init("eca_cdev_circular_buffer",
933 CRYPTO_ADAPTER_OPS_BUFFER_SZ)) {
934 RTE_EDEV_LOG_ERR("Failed to get memory for cryptodev "
941 if (queue_pair_id == -1) {
942 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
943 eca_update_qp_info(adapter, dev_info, i, 1);
945 eca_update_qp_info(adapter, dev_info,
946 (uint16_t)queue_pair_id, 1);
952 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
954 int32_t queue_pair_id,
955 const struct rte_event *event)
957 struct event_crypto_adapter *adapter;
958 struct rte_eventdev *dev;
959 struct crypto_device_info *dev_info;
963 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
965 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
966 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
970 adapter = eca_id_to_adapter(id);
974 dev = &rte_eventdevs[adapter->eventdev_id];
975 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
979 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
980 " cdev %" PRIu8, id, cdev_id);
984 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
986 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
991 dev_info = &adapter->cdevs[cdev_id];
993 if (queue_pair_id != -1 &&
994 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
995 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
996 (uint16_t)queue_pair_id);
1000 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
1001 * no need of service core as HW supports event forward capability.
1003 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1004 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
1005 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
1006 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1007 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1008 RTE_FUNC_PTR_OR_ERR_RET(
1009 *dev->dev_ops->crypto_adapter_queue_pair_add,
1011 if (dev_info->qpairs == NULL) {
1013 rte_zmalloc_socket(adapter->mem_name,
1014 dev_info->dev->data->nb_queue_pairs *
1015 sizeof(struct crypto_queue_pair_info),
1016 0, adapter->socket_id);
1017 if (dev_info->qpairs == NULL)
1021 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
1029 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
1033 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
1034 * or SW adapter, initiate services so the application can choose
1035 * which ever way it wants to use the adapter.
1036 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
1037 * Application may wants to use one of below two mode
1038 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
1039 * b. OP_NEW mode -> HW Dequeue
1040 * Case 2: No HW caps, use SW adapter
1041 * a. OP_FORWARD mode -> SW enqueue & dequeue
1042 * b. OP_NEW mode -> SW Dequeue
1044 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1045 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1046 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
1047 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
1048 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
1049 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
1050 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
1051 rte_spinlock_lock(&adapter->lock);
1052 ret = eca_init_service(adapter, id);
1054 ret = eca_add_queue_pair(adapter, cdev_id,
1056 rte_spinlock_unlock(&adapter->lock);
1061 rte_service_component_runstate_set(adapter->service_id, 1);
1064 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
1070 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
1071 int32_t queue_pair_id)
1073 struct event_crypto_adapter *adapter;
1074 struct crypto_device_info *dev_info;
1075 struct rte_eventdev *dev;
1080 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1082 if (!rte_cryptodev_is_valid_dev(cdev_id)) {
1083 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
1087 adapter = eca_id_to_adapter(id);
1088 if (adapter == NULL)
1091 dev = &rte_eventdevs[adapter->eventdev_id];
1092 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
1098 dev_info = &adapter->cdevs[cdev_id];
1100 if (queue_pair_id != -1 &&
1101 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
1102 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
1103 (uint16_t)queue_pair_id);
1107 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
1108 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
1109 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
1110 RTE_FUNC_PTR_OR_ERR_RET(
1111 *dev->dev_ops->crypto_adapter_queue_pair_del,
1113 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
1117 eca_update_qp_info(adapter,
1118 &adapter->cdevs[cdev_id],
1121 if (dev_info->num_qpairs == 0) {
1122 rte_free(dev_info->qpairs);
1123 dev_info->qpairs = NULL;
1127 if (adapter->nb_qps == 0)
1130 rte_spinlock_lock(&adapter->lock);
1131 if (queue_pair_id == -1) {
1132 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
1134 eca_update_qp_info(adapter, dev_info,
1137 eca_update_qp_info(adapter, dev_info,
1138 (uint16_t)queue_pair_id, 0);
1141 if (dev_info->num_qpairs == 0) {
1142 rte_free(dev_info->qpairs);
1143 dev_info->qpairs = NULL;
1146 rte_spinlock_unlock(&adapter->lock);
1147 rte_service_component_runstate_set(adapter->service_id,
1151 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
1152 queue_pair_id, ret);
1157 eca_adapter_ctrl(uint8_t id, int start)
1159 struct event_crypto_adapter *adapter;
1160 struct crypto_device_info *dev_info;
1161 struct rte_eventdev *dev;
1167 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1168 adapter = eca_id_to_adapter(id);
1169 if (adapter == NULL)
1172 dev = &rte_eventdevs[adapter->eventdev_id];
1174 for (i = 0; i < rte_cryptodev_count(); i++) {
1175 dev_info = &adapter->cdevs[i];
1176 /* if start check for num queue pairs */
1177 if (start && !dev_info->num_qpairs)
1179 /* if stop check if dev has been started */
1180 if (stop && !dev_info->dev_started)
1182 use_service |= !dev_info->internal_event_port;
1183 dev_info->dev_started = start;
1184 if (dev_info->internal_event_port == 0)
1186 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1187 &dev_info->dev[i]) :
1188 (*dev->dev_ops->crypto_adapter_stop)(dev,
1193 rte_service_runstate_set(adapter->service_id, start);
1199 rte_event_crypto_adapter_start(uint8_t id)
1201 struct event_crypto_adapter *adapter;
1203 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1204 adapter = eca_id_to_adapter(id);
1205 if (adapter == NULL)
1208 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1209 return eca_adapter_ctrl(id, 1);
1213 rte_event_crypto_adapter_stop(uint8_t id)
1215 rte_eventdev_trace_crypto_adapter_stop(id);
1216 return eca_adapter_ctrl(id, 0);
1220 rte_event_crypto_adapter_stats_get(uint8_t id,
1221 struct rte_event_crypto_adapter_stats *stats)
1223 struct event_crypto_adapter *adapter;
1224 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1225 struct rte_event_crypto_adapter_stats dev_stats;
1226 struct rte_eventdev *dev;
1227 struct crypto_device_info *dev_info;
1231 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1233 adapter = eca_id_to_adapter(id);
1234 if (adapter == NULL || stats == NULL)
1237 dev = &rte_eventdevs[adapter->eventdev_id];
1238 memset(stats, 0, sizeof(*stats));
1239 for (i = 0; i < rte_cryptodev_count(); i++) {
1240 dev_info = &adapter->cdevs[i];
1241 if (dev_info->internal_event_port == 0 ||
1242 dev->dev_ops->crypto_adapter_stats_get == NULL)
1244 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1250 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1251 dev_stats_sum.event_enq_count +=
1252 dev_stats.event_enq_count;
1255 if (adapter->service_inited)
1256 *stats = adapter->crypto_stats;
1258 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1259 stats->event_enq_count += dev_stats_sum.event_enq_count;
1265 rte_event_crypto_adapter_stats_reset(uint8_t id)
1267 struct event_crypto_adapter *adapter;
1268 struct crypto_device_info *dev_info;
1269 struct rte_eventdev *dev;
1272 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1274 adapter = eca_id_to_adapter(id);
1275 if (adapter == NULL)
1278 dev = &rte_eventdevs[adapter->eventdev_id];
1279 for (i = 0; i < rte_cryptodev_count(); i++) {
1280 dev_info = &adapter->cdevs[i];
1281 if (dev_info->internal_event_port == 0 ||
1282 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1284 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1288 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1293 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1295 struct event_crypto_adapter *adapter;
1297 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1299 adapter = eca_id_to_adapter(id);
1300 if (adapter == NULL || service_id == NULL)
1303 if (adapter->service_inited)
1304 *service_id = adapter->service_id;
1306 return adapter->service_inited ? 0 : -ESRCH;
1310 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1312 struct event_crypto_adapter *adapter;
1314 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1316 adapter = eca_id_to_adapter(id);
1317 if (adapter == NULL || event_port_id == NULL)
1320 *event_port_id = adapter->event_port_id;