F: drivers/crypto/snow3g/
F: doc/guides/cryptodevs/snow3g.rst
+Null Crypto PMD
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/null/
+F: doc/guides/cryptodevs/null.rst
+
Packet processing
-----------------
}
}
+ /* Create 2 NULL devices if required */
+ if (gbl_cryptodev_type == RTE_CRYPTODEV_NULL_PMD) {
+ nb_devs = rte_cryptodev_count_devtype(
+ RTE_CRYPTODEV_NULL_PMD);
+ if (nb_devs < 2) {
+ for (i = nb_devs; i < 2; i++) {
+ int dev_id = rte_eal_vdev_init(
+ CRYPTODEV_NAME_NULL_PMD, NULL);
+
+ TEST_ASSERT(dev_id >= 0,
+ "Failed to create instance %u of"
+ " pmd : %s",
+ i, CRYPTODEV_NAME_NULL_PMD);
+ }
+ }
+ }
+
nb_devs = rte_cryptodev_count();
if (nb_devs < 1) {
RTE_LOG(ERR, USER1, "No crypto devices found?");
QUOTE_512_BYTES,
"Plaintext data not as expected");
+ return TEST_SUCCESS;
+}
+
+static int
+test_null_cipher_only_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ /* Generate test mbuf data and space for digest */
+ ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
+ catch_22_quote, QUOTE_512_BYTES, 0);
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = NULL;
+
+ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->cipher_xform);
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
+
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ /* Process crypto operation */
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
+
/* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ rte_pktmbuf_mtod(ut_params->op->sym->m_src, uint8_t *),
+ catch_22_quote,
+ QUOTE_512_BYTES,
+ "Ciphertext data not as expected");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_null_auth_only_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ /* Generate test mbuf data and space for digest */
+ ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
+ catch_22_quote, QUOTE_512_BYTES, 0);
+
+ /* Setup HMAC Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = NULL;
+
+ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
+ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->auth_xform);
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
+
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->auth.data.offset = 0;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ /* Process crypto operation */
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "Digest verification failed");
+ "crypto operation processing failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_null_cipher_auth_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ /* Generate test mbuf data and space for digest */
+ ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
+ catch_22_quote, QUOTE_512_BYTES, 0);
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Setup HMAC Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = NULL;
+
+ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
+ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->cipher_xform);
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
+
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ sym_op->auth.data.offset = 0;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ /* Process crypto operation */
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ rte_pktmbuf_mtod(ut_params->op->sym->m_src, uint8_t *),
+ catch_22_quote,
+ QUOTE_512_BYTES,
+ "Ciphertext data not as expected");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_null_auth_cipher_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ /* Generate test mbuf data and space for digest */
+ ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
+ catch_22_quote, QUOTE_512_BYTES, 0);
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = NULL;
+
+ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Setup HMAC Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = &ut_params->cipher_xform;
+
+ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
+ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->cipher_xform);
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
+
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ sym_op->m_src = ut_params->ibuf;
+
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ sym_op->auth.data.offset = 0;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ /* Process crypto operation */
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto operation processing failed");
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ rte_pktmbuf_mtod(ut_params->op->sym->m_src, uint8_t *),
+ catch_22_quote,
+ QUOTE_512_BYTES,
+ "Ciphertext data not as expected");
+
+ return TEST_SUCCESS;
+}
+
+
+static int
+test_null_invalid_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = NULL;
+
+ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->cipher_xform);
+ TEST_ASSERT_NULL(ut_params->sess,
+ "Session creation succeeded unexpectedly");
+
+
+ /* Setup HMAC Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = NULL;
+
+ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->auth_xform);
+ TEST_ASSERT_NULL(ut_params->sess,
+ "Session creation succeeded unexpectedly");
return TEST_SUCCESS;
}
+#define NULL_BURST_LENGTH (32)
+
+static int
+test_null_burst_operation(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ unsigned i, burst_len = NULL_BURST_LENGTH;
+
+ struct rte_crypto_op *burst[NULL_BURST_LENGTH] = { NULL };
+ struct rte_crypto_op *burst_dequeued[NULL_BURST_LENGTH] = { NULL };
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Setup HMAC Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = NULL;
+
+ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL;
+ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(
+ ts_params->valid_devs[0], &ut_params->cipher_xform);
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+
+ TEST_ASSERT_EQUAL(rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, burst, burst_len),
+ burst_len, "failed to generate burst of crypto ops");
+
+ /* Generate an operation for each mbuf in burst */
+ for (i = 0; i < burst_len; i++) {
+ struct rte_mbuf *m = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf");
+
+ unsigned *data = (unsigned *)rte_pktmbuf_append(m,
+ sizeof(unsigned));
+ *data = i;
+
+ rte_crypto_op_attach_sym_session(burst[i], ut_params->sess);
+
+ burst[i]->sym->m_src = m;
+ }
+
+ /* Process crypto operation */
+ TEST_ASSERT_EQUAL(rte_cryptodev_enqueue_burst(ts_params->valid_devs[0],
+ 0, burst, burst_len),
+ burst_len,
+ "Error enqueuing burst");
+
+ TEST_ASSERT_EQUAL(rte_cryptodev_dequeue_burst(ts_params->valid_devs[0],
+ 0, burst_dequeued, burst_len),
+ burst_len,
+ "Error dequeuing burst");
+
+
+ for (i = 0; i < burst_len; i++) {
+ TEST_ASSERT_EQUAL(
+ *rte_pktmbuf_mtod(burst[i]->sym->m_src, uint32_t *),
+ *rte_pktmbuf_mtod(burst_dequeued[i]->sym->m_src,
+ uint32_t *),
+ "data not as expected");
+
+ rte_pktmbuf_free(burst[i]->sym->m_src);
+ rte_crypto_op_free(burst[i]);
+ }
+
+ return TEST_SUCCESS;
+}
+
static struct unit_test_suite cryptodev_qat_testsuite = {
.suite_name = "Crypto QAT Unit Test Suite",
.setup = testsuite_setup,
}
};
+static struct unit_test_suite cryptodev_null_testsuite = {
+ .suite_name = "Crypto Device NULL Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_auth_only_operation),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_cipher_only_operation),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_cipher_auth_operation),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_auth_cipher_operation),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_invalid_operation),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_null_burst_operation),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
.callback = test_cryptodev_aesni_gcm,
};
+static int
+test_cryptodev_null(void)
+{
+ gbl_cryptodev_type = RTE_CRYPTODEV_NULL_PMD;
+
+ return unit_test_suite_runner(&cryptodev_null_testsuite);
+}
+
+static struct test_command cryptodev_null_cmd = {
+ .command = "cryptodev_null_autotest",
+ .callback = test_cryptodev_null,
+};
+
static int
test_cryptodev_sw_snow3g(void /*argv __rte_unused, int argc __rte_unused*/)
{
REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
+REGISTER_TEST_COMMAND(cryptodev_null_cmd);
REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_cmd);
CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
+#
+# Compile PMD for NULL Crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
+
#
# Compile librte_ring
#
.. BSD LICENSE
- Copyright(c) 2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2015 - 2016 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
aesni_mb
aesni_gcm
+ null
snow3g
qat
--- /dev/null
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Null Crytpo Poll Mode Driver
+============================
+
+The Null Crypto PMD (**librte_pmd_null_crypto**) provides a crypto poll mode
+driver which provides a minimal implementation for a software crypto device. As
+a null device it does not modify the data in the mbuf on which the crypto
+operation is to operate and it only has support for a single cipher and
+authentication algorithm.
+
+When a burst of mbufs is submitted to a Null Crypto PMD for processing then
+each mbuf in the burst will be enqueued in an internal buffer for collection on
+a dequeue call as long as the mbuf has a valid rte_mbuf_offload operation with
+a valid rte_cryptodev_session or rte_crypto_xform chain of operations.
+
+Features
+--------
+
+Modes:
+
+* RTE_CRYPTO_XFORM_CIPHER ONLY
+* RTE_CRYPTO_XFORM_AUTH ONLY
+* RTE_CRYPTO_XFORM_CIPHER THEN RTE_CRYPTO_XFORM_AUTH
+* RTE_CRYPTO_XFORM_AUTH THEN RTE_CRYPTO_XFORM_CIPHER
+
+Cipher algorithms:
+
+* RTE_CRYPTO_CIPHER_NULL
+
+Authentication algorithms:
+
+* RTE_CRYPTO_AUTH_NULL
+
+Limitations
+-----------
+
+* Only in-place is currently supported (destination address is the same as
+ source address).
+
+Installation
+------------
+
+The Null Crypto PMD is enabled and built by default in both the Linux and
+FreeBSD builds.
Added new Crypto PMD to support AES-GCM authenticated encryption and
authenticated decryption in SW.
+* **Added NULL Crypto PMD**
+
+ Added new Crypto PMD to support null crypto operations in SW.
Resolved Issues
---------------
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
include $(RTE_SDK)/mk/rte.subdir.mk
--- /dev/null
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_null_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_null_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "null_crypto_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_NULL_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+
+/** verify and set session parameters */
+int
+null_crypto_set_session_parameters(
+ struct null_crypto_session *sess __rte_unused,
+ const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL) {
+ return -1;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ /* Authentication Only */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* Authentication then Cipher */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL) {
+ /* Cipher Only */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Cipher then Authentication */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
+ struct null_crypto_session *sess __rte_unused)
+{
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct null_crypto_session *
+get_session(struct null_crypto_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct null_crypto_session *sess;
+
+ if (op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session == NULL ||
+ op->session->type != RTE_CRYPTODEV_NULL_PMD))
+ return NULL;
+
+ sess = (struct null_crypto_session *)op->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct null_crypto_session *)c_sess->_private;
+
+ if (null_crypto_set_session_parameters(sess, op->xform) != 0)
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+static uint16_t
+null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_session *sess;
+ struct null_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_null_uninit(const char *name);
+
+/** Create crypto device */
+static int
+cryptodev_null_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct null_crypto_private *internals;
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ NULL_CRYPTO_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct null_crypto_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
+ dev->dev_ops = null_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed", name);
+ cryptodev_null_uninit(crypto_dev_name);
+
+ return -EFAULT;
+}
+
+/** Initialise null crypto device */
+static int
+cryptodev_null_init(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_null_create(name, &init_params);
+}
+
+/** Uninitialise null crypto device */
+static int
+cryptodev_null_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver cryptodev_null_pmd_drv = {
+ .name = CRYPTODEV_NAME_NULL_PMD,
+ .type = PMD_VDEV,
+ .init = cryptodev_null_init,
+ .uninit = cryptodev_null_uninit
+};
+
+PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv);
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "null_crypto_pmd_private.h"
+
+/** Configure device */
+static int
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+null_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct null_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "null_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ NULL_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ NULL_CRYPTO_LOG_INFO(
+ "Unable to reuse existing ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+ struct null_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ null_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL crypto session structure */
+static unsigned
+null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct null_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static void *
+null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ int retval;
+
+ if (unlikely(sess == NULL)) {
+ NULL_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+ retval = null_crypto_set_session_parameters(
+ (struct null_crypto_session *)sess, xform);
+ if (retval != 0) {
+ NULL_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+null_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct null_crypto_session));
+}
+
+struct rte_cryptodev_ops pmd_ops = {
+ .dev_configure = null_crypto_pmd_config,
+ .dev_start = null_crypto_pmd_start,
+ .dev_stop = null_crypto_pmd_stop,
+ .dev_close = null_crypto_pmd_close,
+
+ .stats_get = null_crypto_pmd_stats_get,
+ .stats_reset = null_crypto_pmd_stats_reset,
+
+ .dev_infos_get = null_crypto_pmd_info_get,
+
+ .queue_pair_setup = null_crypto_pmd_qp_setup,
+ .queue_pair_release = null_crypto_pmd_qp_release,
+ .queue_pair_start = null_crypto_pmd_qp_start,
+ .queue_pair_stop = null_crypto_pmd_qp_stop,
+ .queue_pair_count = null_crypto_pmd_qp_count,
+
+ .session_get_size = null_crypto_pmd_session_get_size,
+ .session_configure = null_crypto_pmd_session_configure,
+ .session_clear = null_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
+#define _NULL_CRYPTO_PMD_PRIVATE_H_
+
+#include "rte_config.h"
+
+#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG
+#define NULL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+
+#define NULL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define NULL_CRYPTO_LOG_INFO(fmt, args...)
+#define NULL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each NULL crypto device */
+struct null_crypto_private {
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned max_nb_sessions; /**< Max number of sessions */
+};
+
+/** NULL crypto queue pair */
+struct null_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** NULL crypto private session structure */
+struct null_crypto_session {
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+/** Set and validate NULL crypto session parameters */
+extern int
+null_crypto_set_session_parameters(struct null_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *null_crypto_pmd_ops;
+
+#endif /* _NULL_CRYPTO_PMD_PRIVATE_H_ */
--- /dev/null
+DPDK_16.04 {
+ local: *;
+};
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)