1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
8 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
17 #include "rte_eventdev.h"
18 #include "eventdev_pmd.h"
19 #include "rte_eventdev_trace.h"
20 #include "rte_event_crypto_adapter.h"
23 #define DEFAULT_MAX_NB 128
24 #define CRYPTO_ADAPTER_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
26 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
28 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
29 * iterations of eca_crypto_adapter_enq_run()
31 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
33 struct rte_event_crypto_adapter {
34 /* Event device identifier */
36 /* Event port identifier */
37 uint8_t event_port_id;
38 /* Store event device's implicit release capability */
39 uint8_t implicit_release_disabled;
40 /* Max crypto ops processed in any service function invocation */
42 /* Lock to serialize config updates with service function */
44 /* Next crypto device to be processed */
45 uint16_t next_cdev_id;
46 /* Per crypto device structure */
47 struct crypto_device_info *cdevs;
48 /* Loop counter to flush crypto ops */
49 uint16_t transmit_loop_count;
50 /* Per instance stats structure */
51 struct rte_event_crypto_adapter_stats crypto_stats;
52 /* Configuration callback for rte_service configuration */
53 rte_event_crypto_adapter_conf_cb conf_cb;
54 /* Configuration callback argument */
56 /* Set if default_cb is being used */
58 /* Service initialization state */
59 uint8_t service_inited;
60 /* Memory allocation name */
61 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
62 /* Socket identifier cached from eventdev */
64 /* Per adapter EAL service */
66 /* No. of queue pairs configured */
69 enum rte_event_crypto_adapter_mode mode;
70 } __rte_cache_aligned;
72 /* Per crypto device information */
73 struct crypto_device_info {
74 /* Pointer to cryptodev */
75 struct rte_cryptodev *dev;
76 /* Pointer to queue pair info */
77 struct crypto_queue_pair_info *qpairs;
78 /* Next queue pair to be processed */
79 uint16_t next_queue_pair_id;
80 /* Set to indicate cryptodev->eventdev packet
81 * transfer uses a hardware mechanism
83 uint8_t internal_event_port;
84 /* Set to indicate processing has been started */
86 /* If num_qpairs > 0, the start callback will
87 * be invoked if not already invoked
90 } __rte_cache_aligned;
92 /* Per queue pair information */
93 struct crypto_queue_pair_info {
94 /* Set to indicate queue pair is enabled */
96 /* Pointer to hold rte_crypto_ops for batching */
97 struct rte_crypto_op **op_buffer;
98 /* No of crypto ops accumulated */
100 } __rte_cache_aligned;
102 static struct rte_event_crypto_adapter **event_crypto_adapter;
104 /* Macros to check for valid adapter */
105 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
106 if (!eca_valid_id(id)) { \
107 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
113 eca_valid_id(uint8_t id)
115 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
121 const char *name = "crypto_adapter_array";
122 const struct rte_memzone *mz;
125 sz = sizeof(*event_crypto_adapter) *
126 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
127 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
129 mz = rte_memzone_lookup(name);
131 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
132 RTE_CACHE_LINE_SIZE);
134 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
140 event_crypto_adapter = mz->addr;
144 static inline struct rte_event_crypto_adapter *
145 eca_id_to_adapter(uint8_t id)
147 return event_crypto_adapter ?
148 event_crypto_adapter[id] : NULL;
152 eca_default_config_cb(uint8_t id, uint8_t dev_id,
153 struct rte_event_crypto_adapter_conf *conf, void *arg)
155 struct rte_event_dev_config dev_conf;
156 struct rte_eventdev *dev;
160 struct rte_event_port_conf *port_conf = arg;
161 struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
166 dev = &rte_eventdevs[adapter->eventdev_id];
167 dev_conf = dev->data->dev_conf;
169 started = dev->data->dev_started;
171 rte_event_dev_stop(dev_id);
172 port_id = dev_conf.nb_event_ports;
173 dev_conf.nb_event_ports += 1;
174 ret = rte_event_dev_configure(dev_id, &dev_conf);
176 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
178 if (rte_event_dev_start(dev_id))
184 ret = rte_event_port_setup(dev_id, port_id, port_conf);
186 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
190 conf->event_port_id = port_id;
191 conf->max_nb = DEFAULT_MAX_NB;
193 ret = rte_event_dev_start(dev_id);
195 adapter->default_cb_arg = 1;
200 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
201 rte_event_crypto_adapter_conf_cb conf_cb,
202 enum rte_event_crypto_adapter_mode mode,
205 struct rte_event_crypto_adapter *adapter;
206 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
207 struct rte_event_dev_info dev_info;
212 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
213 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
217 if (event_crypto_adapter == NULL) {
223 adapter = eca_id_to_adapter(id);
224 if (adapter != NULL) {
225 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
229 socket_id = rte_event_dev_socket_id(dev_id);
230 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
231 "rte_event_crypto_adapter_%d", id);
233 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
234 RTE_CACHE_LINE_SIZE, socket_id);
235 if (adapter == NULL) {
236 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
240 ret = rte_event_dev_info_get(dev_id, &dev_info);
242 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
243 dev_id, dev_info.driver_name);
248 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
249 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
250 adapter->eventdev_id = dev_id;
251 adapter->socket_id = socket_id;
252 adapter->conf_cb = conf_cb;
253 adapter->conf_arg = conf_arg;
254 adapter->mode = mode;
255 strcpy(adapter->mem_name, mem_name);
256 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
257 rte_cryptodev_count() *
258 sizeof(struct crypto_device_info), 0,
260 if (adapter->cdevs == NULL) {
261 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
266 rte_spinlock_init(&adapter->lock);
267 for (i = 0; i < rte_cryptodev_count(); i++)
268 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
270 event_crypto_adapter[id] = adapter;
272 rte_eventdev_trace_crypto_adapter_create(id, dev_id, adapter, conf_arg,
279 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
280 struct rte_event_port_conf *port_config,
281 enum rte_event_crypto_adapter_mode mode)
283 struct rte_event_port_conf *pc;
286 if (port_config == NULL)
288 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
290 pc = rte_malloc(NULL, sizeof(*pc), 0);
294 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
295 eca_default_config_cb,
305 rte_event_crypto_adapter_free(uint8_t id)
307 struct rte_event_crypto_adapter *adapter;
309 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
311 adapter = eca_id_to_adapter(id);
315 if (adapter->nb_qps) {
316 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
321 rte_eventdev_trace_crypto_adapter_free(id, adapter);
322 if (adapter->default_cb_arg)
323 rte_free(adapter->conf_arg);
324 rte_free(adapter->cdevs);
326 event_crypto_adapter[id] = NULL;
331 static inline unsigned int
332 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
333 struct rte_event *ev, unsigned int cnt)
335 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
336 union rte_event_crypto_metadata *m_data = NULL;
337 struct crypto_queue_pair_info *qp_info = NULL;
338 struct rte_crypto_op *crypto_op;
340 uint16_t qp_id, len, ret;
346 stats->event_deq_count += cnt;
348 for (i = 0; i < cnt; i++) {
349 crypto_op = ev[i].event_ptr;
350 if (crypto_op == NULL)
352 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
353 m_data = rte_cryptodev_sym_session_get_user_data(
354 crypto_op->sym->session);
355 if (m_data == NULL) {
356 rte_pktmbuf_free(crypto_op->sym->m_src);
357 rte_crypto_op_free(crypto_op);
361 cdev_id = m_data->request_info.cdev_id;
362 qp_id = m_data->request_info.queue_pair_id;
363 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
364 if (!qp_info->qp_enabled) {
365 rte_pktmbuf_free(crypto_op->sym->m_src);
366 rte_crypto_op_free(crypto_op);
370 qp_info->op_buffer[len] = crypto_op;
372 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
373 crypto_op->private_data_offset) {
374 m_data = (union rte_event_crypto_metadata *)
375 ((uint8_t *)crypto_op +
376 crypto_op->private_data_offset);
377 cdev_id = m_data->request_info.cdev_id;
378 qp_id = m_data->request_info.queue_pair_id;
379 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
380 if (!qp_info->qp_enabled) {
381 rte_pktmbuf_free(crypto_op->sym->m_src);
382 rte_crypto_op_free(crypto_op);
386 qp_info->op_buffer[len] = crypto_op;
389 rte_pktmbuf_free(crypto_op->sym->m_src);
390 rte_crypto_op_free(crypto_op);
394 if (len == BATCH_SIZE) {
395 struct rte_crypto_op **op_buffer = qp_info->op_buffer;
396 ret = rte_cryptodev_enqueue_burst(cdev_id,
401 stats->crypto_enq_count += ret;
404 struct rte_crypto_op *op;
405 op = op_buffer[ret++];
406 stats->crypto_enq_fail++;
407 rte_pktmbuf_free(op->sym->m_src);
408 rte_crypto_op_free(op);
423 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
425 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
426 struct crypto_device_info *curr_dev;
427 struct crypto_queue_pair_info *curr_queue;
428 struct rte_crypto_op **op_buffer;
429 struct rte_cryptodev *dev;
433 uint16_t num_cdev = rte_cryptodev_count();
436 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
437 curr_dev = &adapter->cdevs[cdev_id];
441 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
443 curr_queue = &curr_dev->qpairs[qp];
444 if (!curr_queue->qp_enabled)
447 op_buffer = curr_queue->op_buffer;
448 ret = rte_cryptodev_enqueue_burst(cdev_id,
452 stats->crypto_enq_count += ret;
454 while (ret < curr_queue->len) {
455 struct rte_crypto_op *op;
456 op = op_buffer[ret++];
457 stats->crypto_enq_fail++;
458 rte_pktmbuf_free(op->sym->m_src);
459 rte_crypto_op_free(op);
469 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
470 unsigned int max_enq)
472 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
473 struct rte_event ev[BATCH_SIZE];
474 unsigned int nb_enq, nb_enqueued;
476 uint8_t event_dev_id = adapter->eventdev_id;
477 uint8_t event_port_id = adapter->event_port_id;
480 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
483 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
484 stats->event_poll_count++;
485 n = rte_event_dequeue_burst(event_dev_id,
486 event_port_id, ev, BATCH_SIZE, 0);
491 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
494 if ((++adapter->transmit_loop_count &
495 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
496 nb_enqueued += eca_crypto_enq_flush(adapter);
503 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
504 struct rte_crypto_op **ops, uint16_t num)
506 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
507 union rte_event_crypto_metadata *m_data = NULL;
508 uint8_t event_dev_id = adapter->eventdev_id;
509 uint8_t event_port_id = adapter->event_port_id;
510 struct rte_event events[BATCH_SIZE];
511 uint16_t nb_enqueued, nb_ev;
518 num = RTE_MIN(num, BATCH_SIZE);
519 for (i = 0; i < num; i++) {
520 struct rte_event *ev = &events[nb_ev++];
521 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
522 m_data = rte_cryptodev_sym_session_get_user_data(
523 ops[i]->sym->session);
524 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
525 ops[i]->private_data_offset) {
526 m_data = (union rte_event_crypto_metadata *)
528 ops[i]->private_data_offset);
531 if (unlikely(m_data == NULL)) {
532 rte_pktmbuf_free(ops[i]->sym->m_src);
533 rte_crypto_op_free(ops[i]);
537 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
538 ev->event_ptr = ops[i];
539 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
540 if (adapter->implicit_release_disabled)
541 ev->op = RTE_EVENT_OP_FORWARD;
543 ev->op = RTE_EVENT_OP_NEW;
547 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
549 &events[nb_enqueued],
550 nb_ev - nb_enqueued);
551 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
552 nb_enqueued < nb_ev);
554 /* Free mbufs and rte_crypto_ops for failed events */
555 for (i = nb_enqueued; i < nb_ev; i++) {
556 struct rte_crypto_op *op = events[i].event_ptr;
557 rte_pktmbuf_free(op->sym->m_src);
558 rte_crypto_op_free(op);
561 stats->event_enq_fail_count += nb_ev - nb_enqueued;
562 stats->event_enq_count += nb_enqueued;
563 stats->event_enq_retry_count += retry - 1;
566 static inline unsigned int
567 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
568 unsigned int max_deq)
570 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
571 struct crypto_device_info *curr_dev;
572 struct crypto_queue_pair_info *curr_queue;
573 struct rte_crypto_op *ops[BATCH_SIZE];
575 struct rte_cryptodev *dev;
577 uint16_t qp, dev_qps;
579 uint16_t num_cdev = rte_cryptodev_count();
586 for (cdev_id = adapter->next_cdev_id;
587 cdev_id < num_cdev; cdev_id++) {
588 curr_dev = &adapter->cdevs[cdev_id];
592 dev_qps = dev->data->nb_queue_pairs;
594 for (qp = curr_dev->next_queue_pair_id;
595 queues < dev_qps; qp = (qp + 1) % dev_qps,
598 curr_queue = &curr_dev->qpairs[qp];
599 if (!curr_queue->qp_enabled)
602 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
608 stats->crypto_deq_count += n;
609 eca_ops_enqueue_burst(adapter, ops, n);
612 if (nb_deq > max_deq) {
613 if ((qp + 1) == dev_qps) {
614 adapter->next_cdev_id =
618 curr_dev->next_queue_pair_id = (qp + 1)
619 % dev->data->nb_queue_pairs;
625 } while (done == false);
630 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
631 unsigned int max_ops)
634 unsigned int e_cnt, d_cnt;
636 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
637 max_ops -= RTE_MIN(max_ops, e_cnt);
639 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
640 max_ops -= RTE_MIN(max_ops, d_cnt);
642 if (e_cnt == 0 && d_cnt == 0)
649 eca_service_func(void *args)
651 struct rte_event_crypto_adapter *adapter = args;
653 if (rte_spinlock_trylock(&adapter->lock) == 0)
655 eca_crypto_adapter_run(adapter, adapter->max_nb);
656 rte_spinlock_unlock(&adapter->lock);
662 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
664 struct rte_event_crypto_adapter_conf adapter_conf;
665 struct rte_service_spec service;
668 if (adapter->service_inited)
671 memset(&service, 0, sizeof(service));
672 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
673 "rte_event_crypto_adapter_%d", id);
674 service.socket_id = adapter->socket_id;
675 service.callback = eca_service_func;
676 service.callback_userdata = adapter;
677 /* Service function handles locking for queue add/del updates */
678 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
679 ret = rte_service_component_register(&service, &adapter->service_id);
681 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
686 ret = adapter->conf_cb(id, adapter->eventdev_id,
687 &adapter_conf, adapter->conf_arg);
689 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
694 adapter->max_nb = adapter_conf.max_nb;
695 adapter->event_port_id = adapter_conf.event_port_id;
696 adapter->service_inited = 1;
702 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
703 struct crypto_device_info *dev_info,
704 int32_t queue_pair_id,
707 struct crypto_queue_pair_info *qp_info;
711 if (dev_info->qpairs == NULL)
714 if (queue_pair_id == -1) {
715 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
716 eca_update_qp_info(adapter, dev_info, i, add);
718 qp_info = &dev_info->qpairs[queue_pair_id];
719 enabled = qp_info->qp_enabled;
721 adapter->nb_qps += !enabled;
722 dev_info->num_qpairs += !enabled;
724 adapter->nb_qps -= enabled;
725 dev_info->num_qpairs -= enabled;
727 qp_info->qp_enabled = !!add;
732 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
736 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
737 struct crypto_queue_pair_info *qpairs;
740 if (dev_info->qpairs == NULL) {
742 rte_zmalloc_socket(adapter->mem_name,
743 dev_info->dev->data->nb_queue_pairs *
744 sizeof(struct crypto_queue_pair_info),
745 0, adapter->socket_id);
746 if (dev_info->qpairs == NULL)
749 qpairs = dev_info->qpairs;
750 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
752 sizeof(struct rte_crypto_op *),
753 0, adapter->socket_id);
754 if (!qpairs->op_buffer) {
760 if (queue_pair_id == -1) {
761 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
762 eca_update_qp_info(adapter, dev_info, i, 1);
764 eca_update_qp_info(adapter, dev_info,
765 (uint16_t)queue_pair_id, 1);
771 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
773 int32_t queue_pair_id,
774 const struct rte_event *event)
776 struct rte_event_crypto_adapter *adapter;
777 struct rte_eventdev *dev;
778 struct crypto_device_info *dev_info;
782 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
784 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
785 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
789 adapter = eca_id_to_adapter(id);
793 dev = &rte_eventdevs[adapter->eventdev_id];
794 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
798 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
799 " cdev %" PRIu8, id, cdev_id);
803 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
805 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
810 dev_info = &adapter->cdevs[cdev_id];
812 if (queue_pair_id != -1 &&
813 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
814 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
815 (uint16_t)queue_pair_id);
819 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
820 * no need of service core as HW supports event forward capability.
822 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
823 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
824 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
825 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
826 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
827 RTE_FUNC_PTR_OR_ERR_RET(
828 *dev->dev_ops->crypto_adapter_queue_pair_add,
830 if (dev_info->qpairs == NULL) {
832 rte_zmalloc_socket(adapter->mem_name,
833 dev_info->dev->data->nb_queue_pairs *
834 sizeof(struct crypto_queue_pair_info),
835 0, adapter->socket_id);
836 if (dev_info->qpairs == NULL)
840 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
848 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
852 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
853 * or SW adapter, initiate services so the application can choose
854 * which ever way it wants to use the adapter.
855 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
856 * Application may wants to use one of below two mode
857 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
858 * b. OP_NEW mode -> HW Dequeue
859 * Case 2: No HW caps, use SW adapter
860 * a. OP_FORWARD mode -> SW enqueue & dequeue
861 * b. OP_NEW mode -> SW Dequeue
863 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
864 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
865 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
866 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
867 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
868 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
869 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
870 rte_spinlock_lock(&adapter->lock);
871 ret = eca_init_service(adapter, id);
873 ret = eca_add_queue_pair(adapter, cdev_id,
875 rte_spinlock_unlock(&adapter->lock);
880 rte_service_component_runstate_set(adapter->service_id, 1);
883 rte_eventdev_trace_crypto_adapter_queue_pair_add(id, cdev_id, event,
889 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
890 int32_t queue_pair_id)
892 struct rte_event_crypto_adapter *adapter;
893 struct crypto_device_info *dev_info;
894 struct rte_eventdev *dev;
899 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
901 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
902 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
906 adapter = eca_id_to_adapter(id);
910 dev = &rte_eventdevs[adapter->eventdev_id];
911 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
917 dev_info = &adapter->cdevs[cdev_id];
919 if (queue_pair_id != -1 &&
920 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
921 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
922 (uint16_t)queue_pair_id);
926 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
927 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
928 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
929 RTE_FUNC_PTR_OR_ERR_RET(
930 *dev->dev_ops->crypto_adapter_queue_pair_del,
932 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
936 eca_update_qp_info(adapter,
937 &adapter->cdevs[cdev_id],
940 if (dev_info->num_qpairs == 0) {
941 rte_free(dev_info->qpairs);
942 dev_info->qpairs = NULL;
946 if (adapter->nb_qps == 0)
949 rte_spinlock_lock(&adapter->lock);
950 if (queue_pair_id == -1) {
951 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
953 eca_update_qp_info(adapter, dev_info,
956 eca_update_qp_info(adapter, dev_info,
957 (uint16_t)queue_pair_id, 0);
960 if (dev_info->num_qpairs == 0) {
961 rte_free(dev_info->qpairs);
962 dev_info->qpairs = NULL;
965 rte_spinlock_unlock(&adapter->lock);
966 rte_service_component_runstate_set(adapter->service_id,
970 rte_eventdev_trace_crypto_adapter_queue_pair_del(id, cdev_id,
976 eca_adapter_ctrl(uint8_t id, int start)
978 struct rte_event_crypto_adapter *adapter;
979 struct crypto_device_info *dev_info;
980 struct rte_eventdev *dev;
986 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
987 adapter = eca_id_to_adapter(id);
991 dev = &rte_eventdevs[adapter->eventdev_id];
993 for (i = 0; i < rte_cryptodev_count(); i++) {
994 dev_info = &adapter->cdevs[i];
995 /* if start check for num queue pairs */
996 if (start && !dev_info->num_qpairs)
998 /* if stop check if dev has been started */
999 if (stop && !dev_info->dev_started)
1001 use_service |= !dev_info->internal_event_port;
1002 dev_info->dev_started = start;
1003 if (dev_info->internal_event_port == 0)
1005 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
1006 &dev_info->dev[i]) :
1007 (*dev->dev_ops->crypto_adapter_stop)(dev,
1012 rte_service_runstate_set(adapter->service_id, start);
1018 rte_event_crypto_adapter_start(uint8_t id)
1020 struct rte_event_crypto_adapter *adapter;
1022 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1023 adapter = eca_id_to_adapter(id);
1024 if (adapter == NULL)
1027 rte_eventdev_trace_crypto_adapter_start(id, adapter);
1028 return eca_adapter_ctrl(id, 1);
1032 rte_event_crypto_adapter_stop(uint8_t id)
1034 rte_eventdev_trace_crypto_adapter_stop(id);
1035 return eca_adapter_ctrl(id, 0);
1039 rte_event_crypto_adapter_stats_get(uint8_t id,
1040 struct rte_event_crypto_adapter_stats *stats)
1042 struct rte_event_crypto_adapter *adapter;
1043 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1044 struct rte_event_crypto_adapter_stats dev_stats;
1045 struct rte_eventdev *dev;
1046 struct crypto_device_info *dev_info;
1050 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1052 adapter = eca_id_to_adapter(id);
1053 if (adapter == NULL || stats == NULL)
1056 dev = &rte_eventdevs[adapter->eventdev_id];
1057 memset(stats, 0, sizeof(*stats));
1058 for (i = 0; i < rte_cryptodev_count(); i++) {
1059 dev_info = &adapter->cdevs[i];
1060 if (dev_info->internal_event_port == 0 ||
1061 dev->dev_ops->crypto_adapter_stats_get == NULL)
1063 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1069 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1070 dev_stats_sum.event_enq_count +=
1071 dev_stats.event_enq_count;
1074 if (adapter->service_inited)
1075 *stats = adapter->crypto_stats;
1077 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1078 stats->event_enq_count += dev_stats_sum.event_enq_count;
1084 rte_event_crypto_adapter_stats_reset(uint8_t id)
1086 struct rte_event_crypto_adapter *adapter;
1087 struct crypto_device_info *dev_info;
1088 struct rte_eventdev *dev;
1091 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1093 adapter = eca_id_to_adapter(id);
1094 if (adapter == NULL)
1097 dev = &rte_eventdevs[adapter->eventdev_id];
1098 for (i = 0; i < rte_cryptodev_count(); i++) {
1099 dev_info = &adapter->cdevs[i];
1100 if (dev_info->internal_event_port == 0 ||
1101 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1103 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1107 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1112 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1114 struct rte_event_crypto_adapter *adapter;
1116 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1118 adapter = eca_id_to_adapter(id);
1119 if (adapter == NULL || service_id == NULL)
1122 if (adapter->service_inited)
1123 *service_id = adapter->service_id;
1125 return adapter->service_inited ? 0 : -ESRCH;
1129 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1131 struct rte_event_crypto_adapter *adapter;
1133 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1135 adapter = eca_id_to_adapter(id);
1136 if (adapter == NULL || event_port_id == NULL)
1139 *event_port_id = adapter->event_port_id;