1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_string_fns.h>
8 #include <rte_malloc.h>
10 #include "cryptodev_pmd.h"
13 * Parse name from argument
16 rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
17 const char *value, void *extra_args)
19 struct rte_cryptodev_pmd_init_params *params = extra_args;
22 n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
23 if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
30 * Parse unsigned integer from argument
33 rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
34 const char *value, void *extra_args)
40 i = strtol(value, &end, 10);
41 if (*end != 0 || errno != 0 || i < 0)
44 *((uint32_t *)extra_args) = i;
49 rte_cryptodev_pmd_parse_input_args(
50 struct rte_cryptodev_pmd_init_params *params,
53 struct rte_kvargs *kvlist = NULL;
60 kvlist = rte_kvargs_parse(args, cryptodev_pmd_valid_params);
64 ret = rte_kvargs_process(kvlist,
65 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
66 &rte_cryptodev_pmd_parse_uint_arg,
67 ¶ms->max_nb_queue_pairs);
71 ret = rte_kvargs_process(kvlist,
72 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
73 &rte_cryptodev_pmd_parse_uint_arg,
78 ret = rte_kvargs_process(kvlist,
79 RTE_CRYPTODEV_PMD_NAME_ARG,
80 &rte_cryptodev_pmd_parse_name_arg,
87 rte_kvargs_free(kvlist);
91 struct rte_cryptodev *
92 rte_cryptodev_pmd_create(const char *name,
93 struct rte_device *device,
94 struct rte_cryptodev_pmd_init_params *params)
96 struct rte_cryptodev *cryptodev;
98 if (params->name[0] != '\0') {
99 CDEV_LOG_INFO("User specified device name = %s\n", params->name);
103 CDEV_LOG_INFO("Creating cryptodev %s\n", name);
105 CDEV_LOG_INFO("Initialisation parameters - name: %s,"
106 "socket id: %d, max queue pairs: %u",
107 name, params->socket_id, params->max_nb_queue_pairs);
109 /* allocate device structure */
110 cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
111 if (cryptodev == NULL) {
112 CDEV_LOG_ERR("Failed to allocate crypto device for %s", name);
116 /* allocate private device structure */
117 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
118 cryptodev->data->dev_private =
119 rte_zmalloc_socket("cryptodev device private",
120 params->private_data_size,
124 if (cryptodev->data->dev_private == NULL) {
125 CDEV_LOG_ERR("Cannot allocate memory for cryptodev %s"
126 " private data", name);
128 rte_cryptodev_pmd_release_device(cryptodev);
133 cryptodev->device = device;
135 /* initialise user call-back tail queue */
136 TAILQ_INIT(&(cryptodev->link_intr_cbs));
142 rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
145 void *dev_priv = cryptodev->data->dev_private;
147 CDEV_LOG_INFO("Closing crypto device %s", cryptodev->device->name);
149 /* free crypto device */
150 retval = rte_cryptodev_pmd_release_device(cryptodev);
154 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
158 cryptodev->device = NULL;
159 cryptodev->data = NULL;
165 rte_cryptodev_pmd_probing_finish(struct rte_cryptodev *cryptodev)
167 if (cryptodev == NULL)
170 * for secondary process, at that point we expect device
171 * to be already 'usable', so shared data and all function
172 * pointers for fast-path devops have to be setup properly
173 * inside rte_cryptodev.
175 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
176 cryptodev_fp_ops_set(rte_crypto_fp_ops +
177 cryptodev->data->dev_id, cryptodev);
181 dummy_crypto_enqueue_burst(__rte_unused void *qp,
182 __rte_unused struct rte_crypto_op **ops,
183 __rte_unused uint16_t nb_ops)
186 "crypto enqueue burst requested for unconfigured device");
192 dummy_crypto_dequeue_burst(__rte_unused void *qp,
193 __rte_unused struct rte_crypto_op **ops,
194 __rte_unused uint16_t nb_ops)
197 "crypto dequeue burst requested for unconfigured device");
203 cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
205 static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
206 static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
207 static const struct rte_crypto_fp_ops dummy = {
208 .enqueue_burst = dummy_crypto_enqueue_burst,
209 .dequeue_burst = dummy_crypto_dequeue_burst,
221 cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
222 const struct rte_cryptodev *dev)
224 fp_ops->enqueue_burst = dev->enqueue_burst;
225 fp_ops->dequeue_burst = dev->dequeue_burst;
226 fp_ops->qp.data = dev->data->queue_pairs;
227 fp_ops->qp.enq_cb = dev->enq_cbs;
228 fp_ops->qp.deq_cb = dev->deq_cbs;
232 rte_cryptodev_session_event_mdata_get(struct rte_crypto_op *op)
234 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
235 op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
236 return rte_cryptodev_sym_session_get_user_data(op->sym->session);
237 else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC &&
238 op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
239 return op->asym->session->event_mdata;
240 else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
241 op->private_data_offset)
242 return ((uint8_t *)op + op->private_data_offset);