1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
8 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29 * iterations of eca_crypto_adapter_enq_run()
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
33 struct rte_event_crypto_adapter {
34 /* Event device identifier */
36 /* Event port identifier */
37 uint8_t event_port_id;
38 /* Store event device's implicit release capability */
39 uint8_t implicit_release_disabled;
40 /* Max crypto ops processed in any service function invocation */
42 /* Lock to serialize config updates with service function */
44 /* Next crypto device to be processed */
45 uint16_t next_cdev_id;
46 /* Per crypto device structure */
47 struct crypto_device_info *cdevs;
48 /* Loop counter to flush crypto ops */
49 uint16_t transmit_loop_count;
50 /* Per instance stats structure */
51 struct rte_event_crypto_adapter_stats crypto_stats;
52 /* Configuration callback for rte_service configuration */
53 rte_event_crypto_adapter_conf_cb conf_cb;
54 /* Configuration callback argument */
56 /* Set if default_cb is being used */
58 /* Service initialization state */
59 uint8_t service_inited;
60 /* Memory allocation name */
61 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62 /* Socket identifier cached from eventdev */
64 /* Per adapter EAL service */
66 /* No. of queue pairs configured */
69 enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
72 /* Per crypto device information */
73 struct crypto_device_info {
74 /* Pointer to cryptodev */
75 struct rte_cryptodev *dev;
76 /* Pointer to queue pair info */
77 struct crypto_queue_pair_info *qpairs;
78 /* Next queue pair to be processed */
79 uint16_t next_queue_pair_id;
80 /* Set to indicate cryptodev->eventdev packet
81 * transfer uses a hardware mechanism
83 uint8_t internal_event_port;
84 /* Set to indicate processing has been started */
86 /* If num_qpairs > 0, the start callback will
87 * be invoked if not already invoked
90 } __rte_cache_aligned;
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94 /* Set to indicate queue pair is enabled */
96 /* Pointer to hold rte_crypto_ops for batching */
97 struct rte_crypto_op **op_buffer;
98 /* No of crypto ops accumulated */
100 } __rte_cache_aligned;
102 static struct rte_event_crypto_adapter **event_crypto_adapter;
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106 if (!eca_valid_id(id)) { \
107 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
113 eca_valid_id(uint8_t id)
115 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
121 const char *name = "crypto_adapter_array";
122 const struct rte_memzone *mz;
125 sz = sizeof(*event_crypto_adapter) *
126 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
129 mz = rte_memzone_lookup(name);
131 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132 RTE_CACHE_LINE_SIZE);
134 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
140 event_crypto_adapter = mz->addr;
144 static inline struct rte_event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
147 return event_crypto_adapter ?
148 event_crypto_adapter[id] : NULL;
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153 struct rte_event_crypto_adapter_conf *conf, void *arg)
155 struct rte_event_dev_config dev_conf;
156 struct rte_eventdev *dev;
160 struct rte_event_port_conf *port_conf = arg;
161 struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
166 dev = &rte_eventdevs[adapter->eventdev_id];
167 dev_conf = dev->data->dev_conf;
169 started = dev->data->dev_started;
171 rte_event_dev_stop(dev_id);
172 port_id = dev_conf.nb_event_ports;
173 dev_conf.nb_event_ports += 1;
174 ret = rte_event_dev_configure(dev_id, &dev_conf);
176 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
178 if (rte_event_dev_start(dev_id))
184 ret = rte_event_port_setup(dev_id, port_id, port_conf);
186 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
190 conf->event_port_id = port_id;
191 conf->max_nb = DEFAULT_MAX_NB;
193 ret = rte_event_dev_start(dev_id);
195 adapter->default_cb_arg = 1;
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201 rte_event_crypto_adapter_conf_cb conf_cb,
202 enum rte_event_crypto_adapter_mode mode,
205 struct rte_event_crypto_adapter *adapter;
206 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207 struct rte_event_dev_info dev_info;
212 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
217 if (event_crypto_adapter == NULL) {
223 adapter = eca_id_to_adapter(id);
224 if (adapter != NULL) {
225 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
229 socket_id = rte_event_dev_socket_id(dev_id);
230 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231 "rte_event_crypto_adapter_%d", id);
233 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234 RTE_CACHE_LINE_SIZE, socket_id);
235 if (adapter == NULL) {
236 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
240 ret = rte_event_dev_info_get(dev_id, &dev_info);
242 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243 dev_id, dev_info.driver_name);
247 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
248 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
249 adapter->eventdev_id = dev_id;
250 adapter->socket_id = socket_id;
251 adapter->conf_cb = conf_cb;
252 adapter->conf_arg = conf_arg;
253 adapter->mode = mode;
254 strcpy(adapter->mem_name, mem_name);
255 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
256 rte_cryptodev_count() *
257 sizeof(struct crypto_device_info), 0,
259 if (adapter->cdevs == NULL) {
260 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
265 rte_spinlock_init(&adapter->lock);
266 for (i = 0; i < rte_cryptodev_count(); i++)
267 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
269 event_crypto_adapter[id] = adapter;
271 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
278 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
279 struct rte_event_port_conf *port_config,
280 enum rte_event_crypto_adapter_mode mode)
282 struct rte_event_port_conf *pc;
285 if (port_config == NULL)
287 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289 pc = rte_malloc(NULL, sizeof(*pc), 0);
293 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
294 eca_default_config_cb,
304 rte_event_crypto_adapter_free(uint8_t id)
306 struct rte_event_crypto_adapter *adapter;
308 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
310 adapter = eca_id_to_adapter(id);
314 if (adapter->nb_qps) {
315 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
320 rte_eventdev_trace_crypto_adapter_free(id, adapter);
321 if (adapter->default_cb_arg)
322 rte_free(adapter->conf_arg);
323 rte_free(adapter->cdevs);
325 event_crypto_adapter[id] = NULL;
330 static inline unsigned int
331 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
332 struct rte_event *ev, unsigned int cnt)
334 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
335 union rte_event_crypto_metadata *m_data = NULL;
336 struct crypto_queue_pair_info *qp_info = NULL;
337 struct rte_crypto_op *crypto_op;
339 uint16_t qp_id, len, ret;
345 stats->event_deq_count += cnt;
347 for (i = 0; i < cnt; i++) {
348 crypto_op = ev[i].event_ptr;
349 if (crypto_op == NULL)
351 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
352 m_data = rte_cryptodev_sym_session_get_user_data(
353 crypto_op->sym->session);
354 if (m_data == NULL) {
355 rte_pktmbuf_free(crypto_op->sym->m_src);
356 rte_crypto_op_free(crypto_op);
360 cdev_id = m_data->request_info.cdev_id;
361 qp_id = m_data->request_info.queue_pair_id;
362 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
363 if (!qp_info->qp_enabled) {
364 rte_pktmbuf_free(crypto_op->sym->m_src);
365 rte_crypto_op_free(crypto_op);
369 qp_info->op_buffer[len] = crypto_op;
371 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
372 crypto_op->private_data_offset) {
373 m_data = (union rte_event_crypto_metadata *)
374 ((uint8_t *)crypto_op +
375 crypto_op->private_data_offset);
376 cdev_id = m_data->request_info.cdev_id;
377 qp_id = m_data->request_info.queue_pair_id;
378 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
379 if (!qp_info->qp_enabled) {
380 rte_pktmbuf_free(crypto_op->sym->m_src);
381 rte_crypto_op_free(crypto_op);
385 qp_info->op_buffer[len] = crypto_op;
388 rte_pktmbuf_free(crypto_op->sym->m_src);
389 rte_crypto_op_free(crypto_op);
393 if (len == BATCH_SIZE) {
394 struct rte_crypto_op **op_buffer = qp_info->op_buffer;
395 ret = rte_cryptodev_enqueue_burst(cdev_id,
400 stats->crypto_enq_count += ret;
403 struct rte_crypto_op *op;
404 op = op_buffer[ret++];
405 stats->crypto_enq_fail++;
406 rte_pktmbuf_free(op->sym->m_src);
407 rte_crypto_op_free(op);
422 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
424 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
425 struct crypto_device_info *curr_dev;
426 struct crypto_queue_pair_info *curr_queue;
427 struct rte_crypto_op **op_buffer;
428 struct rte_cryptodev *dev;
432 uint16_t num_cdev = rte_cryptodev_count();
435 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
436 curr_dev = &adapter->cdevs[cdev_id];
440 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
442 curr_queue = &curr_dev->qpairs[qp];
443 if (!curr_queue->qp_enabled)
446 op_buffer = curr_queue->op_buffer;
447 ret = rte_cryptodev_enqueue_burst(cdev_id,
451 stats->crypto_enq_count += ret;
453 while (ret < curr_queue->len) {
454 struct rte_crypto_op *op;
455 op = op_buffer[ret++];
456 stats->crypto_enq_fail++;
457 rte_pktmbuf_free(op->sym->m_src);
458 rte_crypto_op_free(op);
468 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
469 unsigned int max_enq)
471 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
472 struct rte_event ev[BATCH_SIZE];
473 unsigned int nb_enq, nb_enqueued;
475 uint8_t event_dev_id = adapter->eventdev_id;
476 uint8_t event_port_id = adapter->event_port_id;
479 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
482 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
483 stats->event_poll_count++;
484 n = rte_event_dequeue_burst(event_dev_id,
485 event_port_id, ev, BATCH_SIZE, 0);
490 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
493 if ((++adapter->transmit_loop_count &
494 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
495 nb_enqueued += eca_crypto_enq_flush(adapter);
502 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
503 struct rte_crypto_op **ops, uint16_t num)
505 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
506 union rte_event_crypto_metadata *m_data = NULL;
507 uint8_t event_dev_id = adapter->eventdev_id;
508 uint8_t event_port_id = adapter->event_port_id;
509 struct rte_event events[BATCH_SIZE];
510 uint16_t nb_enqueued, nb_ev;
517 num = RTE_MIN(num, BATCH_SIZE);
518 for (i = 0; i < num; i++) {
519 struct rte_event *ev = &events[nb_ev++];
520 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
521 m_data = rte_cryptodev_sym_session_get_user_data(
522 ops[i]->sym->session);
523 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
524 ops[i]->private_data_offset) {
525 m_data = (union rte_event_crypto_metadata *)
527 ops[i]->private_data_offset);
530 if (unlikely(m_data == NULL)) {
531 rte_pktmbuf_free(ops[i]->sym->m_src);
532 rte_crypto_op_free(ops[i]);
536 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
537 ev->event_ptr = ops[i];
538 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
539 if (adapter->implicit_release_disabled)
540 ev->op = RTE_EVENT_OP_FORWARD;
542 ev->op = RTE_EVENT_OP_NEW;
546 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
548 &events[nb_enqueued],
549 nb_ev - nb_enqueued);
550 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
551 nb_enqueued < nb_ev);
553 /* Free mbufs and rte_crypto_ops for failed events */
554 for (i = nb_enqueued; i < nb_ev; i++) {
555 struct rte_crypto_op *op = events[i].event_ptr;
556 rte_pktmbuf_free(op->sym->m_src);
557 rte_crypto_op_free(op);
560 stats->event_enq_fail_count += nb_ev - nb_enqueued;
561 stats->event_enq_count += nb_enqueued;
562 stats->event_enq_retry_count += retry - 1;
565 static inline unsigned int
566 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
567 unsigned int max_deq)
569 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
570 struct crypto_device_info *curr_dev;
571 struct crypto_queue_pair_info *curr_queue;
572 struct rte_crypto_op *ops[BATCH_SIZE];
574 struct rte_cryptodev *dev;
576 uint16_t qp, dev_qps;
578 uint16_t num_cdev = rte_cryptodev_count();
585 for (cdev_id = adapter->next_cdev_id;
586 cdev_id < num_cdev; cdev_id++) {
587 curr_dev = &adapter->cdevs[cdev_id];
591 dev_qps = dev->data->nb_queue_pairs;
593 for (qp = curr_dev->next_queue_pair_id;
594 queues < dev_qps; qp = (qp + 1) % dev_qps,
597 curr_queue = &curr_dev->qpairs[qp];
598 if (!curr_queue->qp_enabled)
601 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
607 stats->crypto_deq_count += n;
608 eca_ops_enqueue_burst(adapter, ops, n);
611 if (nb_deq > max_deq) {
612 if ((qp + 1) == dev_qps) {
613 adapter->next_cdev_id =
617 curr_dev->next_queue_pair_id = (qp + 1)
618 % dev->data->nb_queue_pairs;
624 } while (done == false);
629 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
630 unsigned int max_ops)
633 unsigned int e_cnt, d_cnt;
635 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
636 max_ops -= RTE_MIN(max_ops, e_cnt);
638 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
639 max_ops -= RTE_MIN(max_ops, d_cnt);
641 if (e_cnt == 0 && d_cnt == 0)
648 eca_service_func(void *args)
650 struct rte_event_crypto_adapter *adapter = args;
652 if (rte_spinlock_trylock(&adapter->lock) == 0)
654 eca_crypto_adapter_run(adapter, adapter->max_nb);
655 rte_spinlock_unlock(&adapter->lock);
661 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
663 struct rte_event_crypto_adapter_conf adapter_conf;
664 struct rte_service_spec service;
667 if (adapter->service_inited)
670 memset(&service, 0, sizeof(service));
671 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
672 "rte_event_crypto_adapter_%d", id);
673 service.socket_id = adapter->socket_id;
674 service.callback = eca_service_func;
675 service.callback_userdata = adapter;
676 /* Service function handles locking for queue add/del updates */
677 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
678 ret = rte_service_component_register(&service, &adapter->service_id);
680 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
685 ret = adapter->conf_cb(id, adapter->eventdev_id,
686 &adapter_conf, adapter->conf_arg);
688 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
693 adapter->max_nb = adapter_conf.max_nb;
694 adapter->event_port_id = adapter_conf.event_port_id;
695 adapter->service_inited = 1;
701 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
702 struct crypto_device_info *dev_info,
703 int32_t queue_pair_id,
706 struct crypto_queue_pair_info *qp_info;
710 if (dev_info->qpairs == NULL)
713 if (queue_pair_id == -1) {
714 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
715 eca_update_qp_info(adapter, dev_info, i, add);
717 qp_info = &dev_info->qpairs[queue_pair_id];
718 enabled = qp_info->qp_enabled;
720 adapter->nb_qps += !enabled;
721 dev_info->num_qpairs += !enabled;
723 adapter->nb_qps -= enabled;
724 dev_info->num_qpairs -= enabled;
726 qp_info->qp_enabled = !!add;
731 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
735 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
736 struct crypto_queue_pair_info *qpairs;
739 if (dev_info->qpairs == NULL) {
741 rte_zmalloc_socket(adapter->mem_name,
742 dev_info->dev->data->nb_queue_pairs *
743 sizeof(struct crypto_queue_pair_info),
744 0, adapter->socket_id);
745 if (dev_info->qpairs == NULL)
748 qpairs = dev_info->qpairs;
749 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
751 sizeof(struct rte_crypto_op *),
752 0, adapter->socket_id);
753 if (!qpairs->op_buffer) {
759 if (queue_pair_id == -1) {
760 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
761 eca_update_qp_info(adapter, dev_info, i, 1);
763 eca_update_qp_info(adapter, dev_info,
764 (uint16_t)queue_pair_id, 1);
770 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
772 int32_t queue_pair_id,
773 const struct rte_event *event)
775 struct rte_event_crypto_adapter *adapter;
776 struct rte_eventdev *dev;
777 struct crypto_device_info *dev_info;
781 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
783 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
784 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
788 adapter = eca_id_to_adapter(id);
792 dev = &rte_eventdevs[adapter->eventdev_id];
793 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
797 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
798 " cdev %" PRIu8, id, cdev_id);
802 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
804 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
809 dev_info = &adapter->cdevs[cdev_id];
811 if (queue_pair_id != -1 &&
812 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
813 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
814 (uint16_t)queue_pair_id);
818 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
819 * no need of service core as HW supports event forward capability.
821 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
822 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
823 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
824 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
825 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
826 RTE_FUNC_PTR_OR_ERR_RET(
827 *dev->dev_ops->crypto_adapter_queue_pair_add,
829 if (dev_info->qpairs == NULL) {
831 rte_zmalloc_socket(adapter->mem_name,
832 dev_info->dev->data->nb_queue_pairs *
833 sizeof(struct crypto_queue_pair_info),
834 0, adapter->socket_id);
835 if (dev_info->qpairs == NULL)
839 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
847 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
851 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
852 * or SW adapter, initiate services so the application can choose
853 * which ever way it wants to use the adapter.
854 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
855 * Application may wants to use one of below two mode
856 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
857 * b. OP_NEW mode -> HW Dequeue
858 * Case 2: No HW caps, use SW adapter
859 * a. OP_FORWARD mode -> SW enqueue & dequeue
860 * b. OP_NEW mode -> SW Dequeue
862 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
863 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
864 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
865 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
866 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
867 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
868 rte_spinlock_lock(&adapter->lock);
869 ret = eca_init_service(adapter, id);
871 ret = eca_add_queue_pair(adapter, cdev_id,
873 rte_spinlock_unlock(&adapter->lock);
878 rte_service_component_runstate_set(adapter->service_id, 1);
881 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
887 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
888 int32_t queue_pair_id)
890 struct rte_event_crypto_adapter *adapter;
891 struct crypto_device_info *dev_info;
892 struct rte_eventdev *dev;
897 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
899 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
900 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
904 adapter = eca_id_to_adapter(id);
908 dev = &rte_eventdevs[adapter->eventdev_id];
909 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
915 dev_info = &adapter->cdevs[cdev_id];
917 if (queue_pair_id != -1 &&
918 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
919 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
920 (uint16_t)queue_pair_id);
924 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
925 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
926 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
927 RTE_FUNC_PTR_OR_ERR_RET(
928 *dev->dev_ops->crypto_adapter_queue_pair_del,
930 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
934 eca_update_qp_info(adapter,
935 &adapter->cdevs[cdev_id],
938 if (dev_info->num_qpairs == 0) {
939 rte_free(dev_info->qpairs);
940 dev_info->qpairs = NULL;
944 if (adapter->nb_qps == 0)
947 rte_spinlock_lock(&adapter->lock);
948 if (queue_pair_id == -1) {
949 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
951 eca_update_qp_info(adapter, dev_info,
954 eca_update_qp_info(adapter, dev_info,
955 (uint16_t)queue_pair_id, 0);
958 if (dev_info->num_qpairs == 0) {
959 rte_free(dev_info->qpairs);
960 dev_info->qpairs = NULL;
963 rte_spinlock_unlock(&adapter->lock);
964 rte_service_component_runstate_set(adapter->service_id,
968 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
974 eca_adapter_ctrl(uint8_t id, int start)
976 struct rte_event_crypto_adapter *adapter;
977 struct crypto_device_info *dev_info;
978 struct rte_eventdev *dev;
984 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
985 adapter = eca_id_to_adapter(id);
989 dev = &rte_eventdevs[adapter->eventdev_id];
991 for (i = 0; i < rte_cryptodev_count(); i++) {
992 dev_info = &adapter->cdevs[i];
993 /* if start check for num queue pairs */
994 if (start && !dev_info->num_qpairs)
996 /* if stop check if dev has been started */
997 if (stop && !dev_info->dev_started)
999 use_service |= !dev_info->internal_event_port;
1000 dev_info->dev_started = start;
1001 if (dev_info->internal_event_port == 0)
1003 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1004 &dev_info->dev[i]) :
1005 (*dev->dev_ops->crypto_adapter_stop)(dev,
1010 rte_service_runstate_set(adapter->service_id, start);
1016 rte_event_crypto_adapter_start(uint8_t id)
1018 struct rte_event_crypto_adapter *adapter;
1020 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1021 adapter = eca_id_to_adapter(id);
1022 if (adapter == NULL)
1025 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1026 return eca_adapter_ctrl(id, 1);
1030 rte_event_crypto_adapter_stop(uint8_t id)
1032 rte_eventdev_trace_crypto_adapter_stop(id);
1033 return eca_adapter_ctrl(id, 0);
1037 rte_event_crypto_adapter_stats_get(uint8_t id,
1038 struct rte_event_crypto_adapter_stats *stats)
1040 struct rte_event_crypto_adapter *adapter;
1041 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1042 struct rte_event_crypto_adapter_stats dev_stats;
1043 struct rte_eventdev *dev;
1044 struct crypto_device_info *dev_info;
1048 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1050 adapter = eca_id_to_adapter(id);
1051 if (adapter == NULL || stats == NULL)
1054 dev = &rte_eventdevs[adapter->eventdev_id];
1055 memset(stats, 0, sizeof(*stats));
1056 for (i = 0; i < rte_cryptodev_count(); i++) {
1057 dev_info = &adapter->cdevs[i];
1058 if (dev_info->internal_event_port == 0 ||
1059 dev->dev_ops->crypto_adapter_stats_get == NULL)
1061 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1067 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1068 dev_stats_sum.event_enq_count +=
1069 dev_stats.event_enq_count;
1072 if (adapter->service_inited)
1073 *stats = adapter->crypto_stats;
1075 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1076 stats->event_enq_count += dev_stats_sum.event_enq_count;
1082 rte_event_crypto_adapter_stats_reset(uint8_t id)
1084 struct rte_event_crypto_adapter *adapter;
1085 struct crypto_device_info *dev_info;
1086 struct rte_eventdev *dev;
1089 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1091 adapter = eca_id_to_adapter(id);
1092 if (adapter == NULL)
1095 dev = &rte_eventdevs[adapter->eventdev_id];
1096 for (i = 0; i < rte_cryptodev_count(); i++) {
1097 dev_info = &adapter->cdevs[i];
1098 if (dev_info->internal_event_port == 0 ||
1099 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1101 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1105 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1110 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1112 struct rte_event_crypto_adapter *adapter;
1114 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1116 adapter = eca_id_to_adapter(id);
1117 if (adapter == NULL || service_id == NULL)
1120 if (adapter->service_inited)
1121 *service_id = adapter->service_id;
1123 return adapter->service_inited ? 0 : -ESRCH;
1127 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1129 struct rte_event_crypto_adapter *adapter;
1131 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1133 adapter = eca_id_to_adapter(id);
1134 if (adapter == NULL || event_port_id == NULL)
1137 *event_port_id = adapter->event_port_id;