4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_cryptodev_pmd.h>
36 #include <rte_cryptodev_vdev.h>
38 #include <rte_malloc.h>
40 #include "null_crypto_pmd_private.h"
42 /** verify and set session parameters */
44 null_crypto_set_session_parameters(
45 struct null_crypto_session *sess __rte_unused,
46 const struct rte_crypto_sym_xform *xform)
50 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
51 xform->next == NULL) {
52 /* Authentication Only */
53 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
55 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
56 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
57 /* Authentication then Cipher */
58 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
59 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
61 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
62 xform->next == NULL) {
64 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
66 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
67 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
68 /* Cipher then Authentication */
69 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
70 xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
77 /** Process crypto operation for mbuf */
79 process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
80 struct null_crypto_session *sess __rte_unused)
82 /* set status as successful by default */
83 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
86 * if crypto session and operation are valid just enqueue the packet
87 * in the processed ring
89 return rte_ring_enqueue(qp->processed_pkts, (void *)op);
92 static struct null_crypto_session *
93 get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
95 struct null_crypto_session *sess;
96 struct rte_crypto_sym_op *sym_op = op->sym;
98 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
99 if (unlikely(sym_op->session == NULL ||
100 sym_op->session->dev_type != RTE_CRYPTODEV_NULL_PMD))
103 sess = (struct null_crypto_session *)sym_op->session->_private;
105 struct rte_cryptodev_session *c_sess = NULL;
107 if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
110 sess = (struct null_crypto_session *)c_sess->_private;
112 if (null_crypto_set_session_parameters(sess, sym_op->xform) != 0)
121 null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
124 struct null_crypto_session *sess;
125 struct null_crypto_qp *qp = queue_pair;
129 for (i = 0; i < nb_ops; i++) {
130 sess = get_session(qp, ops[i]);
131 if (unlikely(sess == NULL))
134 retval = process_op(qp, ops[i], sess);
135 if (unlikely(retval < 0))
139 qp->qp_stats.enqueued_count += i;
144 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
146 qp->qp_stats.enqueue_err_count++;
152 null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
155 struct null_crypto_qp *qp = queue_pair;
157 unsigned nb_dequeued;
159 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
160 (void **)ops, nb_ops, NULL);
161 qp->qp_stats.dequeued_count += nb_dequeued;
166 static int cryptodev_null_remove(const char *name);
168 /** Create crypto device */
170 cryptodev_null_create(const char *name,
171 struct rte_vdev_device *vdev,
172 struct rte_crypto_vdev_init_params *init_params)
174 struct rte_cryptodev *dev;
175 struct null_crypto_private *internals;
177 if (init_params->name[0] == '\0')
178 snprintf(init_params->name, sizeof(init_params->name),
181 dev = rte_cryptodev_vdev_pmd_init(init_params->name,
182 sizeof(struct null_crypto_private),
183 init_params->socket_id,
186 NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
190 dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
191 dev->dev_ops = null_crypto_pmd_ops;
193 /* register rx/tx burst functions for data path */
194 dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
195 dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
197 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
198 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
199 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
201 internals = dev->data->dev_private;
203 internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
204 internals->max_nb_sessions = init_params->max_nb_sessions;
209 NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed",
211 cryptodev_null_remove(init_params->name);
216 /** Initialise null crypto device */
218 cryptodev_null_probe(struct rte_vdev_device *dev)
220 struct rte_crypto_vdev_init_params init_params = {
221 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
222 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
228 name = rte_vdev_device_name(dev);
232 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
233 name, init_params.socket_id);
234 if (init_params.name[0] != '\0')
235 RTE_LOG(INFO, PMD, " User defined name = %s\n",
237 RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
238 init_params.max_nb_queue_pairs);
239 RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
240 init_params.max_nb_sessions);
242 return cryptodev_null_create(name, dev, &init_params);
245 /** Uninitialise null crypto device */
247 cryptodev_null_remove(const char *name)
252 RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
253 name, rte_socket_id());
259 cryptodev_null_remove_dev(struct rte_vdev_device *dev)
261 return cryptodev_null_remove(rte_vdev_device_name(dev));
264 static struct rte_vdev_driver cryptodev_null_pmd_drv = {
265 .probe = cryptodev_null_probe,
266 .remove = cryptodev_null_remove_dev,
269 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
270 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
271 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
272 "max_nb_queue_pairs=<int> "
273 "max_nb_sessions=<int> "