Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
+Protocol offload = Y
In Place SGL = Y
OOP SGL In SGL Out = Y
OOP SGL In LB Out = Y
* ``RTE_CRYPTO_AEAD_AES_GCM``
* ``RTE_CRYPTO_AEAD_AES_CCM``
+Protocol offloads:
+
+* ``RTE_SECURITY_PROTOCOL_DOCSIS``
Supported Chains
~~~~~~~~~~~~~~~~
generations in the same process if planning to use for GCM.
* The mixed algo feature on GEN2 is not supported by all kernel drivers. Check
the notes under the Available Kernel Drivers table below for specific details.
+* Out-of-place is not supported for combined Crypto-CRC DOCSIS security
+ protocol.
+* ``RTE_CRYPTO_CIPHER_DES_DOCSISBPI`` is not supported for combined Crypto-CRC
+ DOCSIS security protocol.
Extra notes on KASUMI F9
~~~~~~~~~~~~~~~~~~~~~~~~
Added support for lookaside protocol offload for DOCSIS through the
``rte_security`` API.
+* **Updated the QuickAssist Technology (QAT) PMD.**
+
+ * Added support for lookaside protocol offload for DOCSIS through the
+ ``rte_security`` API.
+
* **Added support for BPF_ABS/BPF_IND load instructions.**
Added support for two BPF non-generic instructions:
ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
LDLIBS += -lrte_cryptodev
LDLIBS += -lcrypto
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+ LDLIBS += -lrte_net
+endif
CFLAGS += -DBUILD_QAT_SYM
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
dep = dependency('libcrypto', required: false)
qat_includes += include_directories('.')
qat_deps += 'cryptodev'
+qat_deps += 'net'
+qat_deps += 'security'
if dep.found()
# Add our sources files to the list
qat_sources += files('qat_sym_pmd.c',
#include <rte_crypto_sym.h>
#include <rte_bus_pci.h>
#include <rte_byteorder.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_net_crc.h>
+#endif
#include "qat_sym.h"
return sym_op->cipher.data.length - last_block_len;
}
+#ifdef RTE_LIBRTE_SECURITY
+static inline void
+qat_crc_generate(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint32_t *crc, crc_length;
+ uint8_t *crc_data;
+
+ if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
+ sym_op->auth.data.length != 0) {
+
+ crc_length = sym_op->auth.data.length;
+ crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ sym_op->auth.data.offset);
+ crc = (uint32_t *)(crc_data + crc_length);
+
+ *crc = rte_net_crc_calc(crc_data, crc_length,
+ RTE_NET_CRC32_ETH);
+ }
+}
+#endif
+
static inline void
set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
struct icp_qat_fw_la_cipher_req_params *cipher_param,
void *op_cookie, enum qat_device_gen qat_dev_gen)
{
int ret = 0;
- struct qat_sym_session *ctx;
+ struct qat_sym_session *ctx = NULL;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
uint64_t auth_data_end = 0;
uint8_t do_sgl = 0;
uint8_t in_place = 1;
+ uint8_t is_docsis_sec = 0;
int alignment_adjustment = 0;
struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
struct qat_sym_op_cookie *cookie =
QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
+ } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+ } else {
+ ctx = (struct qat_sym_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (ctx && ctx->bpi_ctx == NULL) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports security"
+ " operation requests for DOCSIS, op"
+ " (%p) is not for DOCSIS.", op);
+ return -EINVAL;
+ }
+ is_docsis_sec = 1;
+#endif
}
- ctx = (struct qat_sym_session *)get_sym_session_private_data(
- op->sym->session, cryptodev_qat_driver_id);
-
if (unlikely(ctx == NULL)) {
QAT_DP_LOG(ERR, "Session was not created for this device");
return -EINVAL;
cipher_ofs = op->sym->cipher.data.offset >> 3;
} else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device
+ /* DOCSIS processing */
+#ifdef RTE_LIBRTE_SECURITY
+ if (is_docsis_sec) {
+ /* Check for OOP */
+ if (unlikely((op->sym->m_dst != NULL) &&
+ (op->sym->m_dst !=
+ op->sym->m_src))) {
+ QAT_DP_LOG(ERR,
+ "OOP not supported for DOCSIS "
+ "security");
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+
+ /* Calculate CRC */
+ qat_crc_generate(ctx, op);
+ }
+#else
+ RTE_SET_USED(is_docsis_sec);
+#endif
+
+ /* Only send complete blocks to device.
* Process any partial block using CFB mode.
* Even if 0 complete blocks, still send this to device
* to get into rx queue for post-process and dequeuing
#define _QAT_SYM_H_
#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_net_crc.h>
+#endif
#ifdef BUILD_QAT_SYM
#include <openssl/evp.h>
return sym_op->cipher.data.length - last_block_len;
}
+#ifdef RTE_LIBRTE_SECURITY
static inline void
-qat_sym_process_response(void **op, uint8_t *resp)
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint32_t crc_offset, crc_length, crc;
+ uint8_t *crc_data;
+
+ if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+ sym_op->auth.data.length != 0) {
+
+ crc_offset = sym_op->auth.data.offset;
+ crc_length = sym_op->auth.data.length;
+ crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ crc_offset);
+ crc = rte_net_crc_calc(crc_data, crc_length, RTE_NET_CRC32_ETH);
+
+ if (crc != *(uint32_t *)(crc_data + crc_length))
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+}
+#endif
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
struct icp_qat_fw_comn_resp *resp_msg =
(struct icp_qat_fw_comn_resp *)resp;
struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
(resp_msg->opaque_data);
+ struct qat_sym_session *sess;
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- struct qat_sym_session *sess = (struct qat_sym_session *)
- get_sym_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t is_docsis_sec = 0;
+
+ if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ /*
+ * Assuming at this point that if it's a security
+ * op, that this is for DOCSIS
+ */
+ sess = (struct qat_sym_session *)
+ get_sec_session_private_data(
+ rx_op->sym->sec_session);
+ is_docsis_sec = 1;
+ } else
+#endif
+ {
+ sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+ }
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- if (sess->bpi_ctx)
+ if (sess->bpi_ctx) {
qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#ifdef RTE_LIBRTE_SECURITY
+ if (is_docsis_sec)
+ qat_crc_verify(sess, rx_op);
+#endif
+ }
}
*op = (void *)rx_op;
}
}, } \
}
+#ifdef RTE_LIBRTE_SECURITY
+#define QAT_SECURITY_SYM_CAPABILITIES \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 16 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_SECURITY_CAPABILITIES(sym) \
+ [0] = { /* DOCSIS Uplink */ \
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
+ .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
+ .docsis = { \
+ .direction = RTE_SECURITY_DOCSIS_UPLINK \
+ }, \
+ .crypto_capabilities = (sym) \
+ }, \
+ [1] = { /* DOCSIS Downlink */ \
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, \
+ .protocol = RTE_SECURITY_PROTOCOL_DOCSIS, \
+ .docsis = { \
+ .direction = RTE_SECURITY_DOCSIS_DOWNLINK \
+ }, \
+ .crypto_capabilities = (sym) \
+ }
+#endif
+
#endif /* _QAT_SYM_CAPABILITIES_H_ */
#include <rte_malloc.h>
#include <rte_pci.h>
#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
#include "qat_logs.h"
#include "qat_sym.h"
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+#ifdef RTE_LIBRTE_SECURITY
+static const struct rte_cryptodev_capabilities
+ qat_security_sym_capabilities[] = {
+ QAT_SECURITY_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability qat_security_capabilities[] = {
+ QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+#endif
+
static int qat_sym_qp_release(struct rte_cryptodev *dev,
uint16_t queue_pair_id);
.sym_session_clear = qat_sym_session_clear
};
+#ifdef RTE_LIBRTE_SECURITY
+static const struct rte_security_capability *
+qat_security_cap_get(void *device __rte_unused)
+{
+ return qat_security_capabilities;
+}
+
+static struct rte_security_ops security_qat_ops = {
+
+ .session_create = qat_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = qat_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = qat_security_cap_get
+};
+#endif
+
static uint16_t
qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *cryptodev;
struct qat_sym_dev_private *internals;
+#ifdef RTE_LIBRTE_SECURITY
+ struct rte_security_ctx *security_instance;
+#endif
snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "sym");
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SECURITY;
+
+#ifdef RTE_LIBRTE_SECURITY
+ security_instance = rte_malloc("qat_sec",
+ sizeof(struct rte_security_ctx),
+ RTE_CACHE_LINE_SIZE);
+ if (security_instance == NULL) {
+ QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+ rte_cryptodev_pmd_destroy(cryptodev);
+ return -ENOMEM;
+ }
+
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &security_qat_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+#endif
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
/* free crypto device */
cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+#ifdef RTE_LIBRTE_SECURITY
+ rte_free(cryptodev->security_ctx);
+#endif
rte_cryptodev_pmd_destroy(cryptodev);
qat_pci_dev->sym_rte_dev.name = NULL;
qat_pci_dev->sym_dev = NULL;
#ifdef BUILD_QAT_SYM
+#include <rte_ether.h>
#include <rte_cryptodev.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security.h>
+#endif
#include "qat_sym_capabilities.h"
#include "qat_device.h"
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_crypto_sym.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security.h>
+#endif
#include "qat_logs.h"
#include "qat_sym_session.h"
}
return 0;
}
+
+#ifdef RTE_LIBRTE_SECURITY
+static int
+qat_sec_session_check_docsis(struct rte_security_session_conf *conf)
+{
+ struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
+ struct rte_security_docsis_xform *docsis = &conf->docsis;
+
+ /* CRC generate -> Cipher encrypt */
+ if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
+
+ if (crypto_sym != NULL &&
+ crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+ (crypto_sym->cipher.key.length ==
+ ICP_QAT_HW_AES_128_KEY_SZ ||
+ crypto_sym->cipher.key.length ==
+ ICP_QAT_HW_AES_256_KEY_SZ) &&
+ crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
+ crypto_sym->next == NULL) {
+ return 0;
+ }
+ /* Cipher decrypt -> CRC verify */
+ } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
+
+ if (crypto_sym != NULL &&
+ crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+ (crypto_sym->cipher.key.length ==
+ ICP_QAT_HW_AES_128_KEY_SZ ||
+ crypto_sym->cipher.key.length ==
+ ICP_QAT_HW_AES_256_KEY_SZ) &&
+ crypto_sym->cipher.iv.length == ICP_QAT_HW_AES_BLK_SZ &&
+ crypto_sym->next == NULL) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf, void *session_private)
+{
+ int ret;
+ int qat_cmd_id;
+ struct rte_crypto_sym_xform *xform = NULL;
+ struct qat_sym_session *session = session_private;
+
+ ret = qat_sec_session_check_docsis(conf);
+ if (ret) {
+ QAT_LOG(ERR, "Unsupported DOCSIS security configuration");
+ return ret;
+ }
+
+ xform = conf->crypto_xform;
+
+ /* Verify the session physical address is known */
+ rte_iova_t session_paddr = rte_mempool_virt2iova(session);
+ if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
+ QAT_LOG(ERR,
+ "Session physical address unknown. Bad memory pool.");
+ return -EINVAL;
+ }
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = session_paddr +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u", session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ QAT_LOG(ERR, "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ if (conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
+ QAT_LOG(ERR, "Invalid security protocol");
+ return -EINVAL;
+ }
+
+ ret = qat_sec_session_set_docsis_parameters(cdev, conf,
+ sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR, "Failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+int
+qat_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ void *sess_priv = get_sec_session_private_data(sess);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+#endif
#include <rte_crypto.h>
#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security.h>
+#endif
#include "qat_common.h"
#include "icp_qat_hw.h"
int
qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+#ifdef RTE_LIBRTE_SECURITY
+int
+qat_security_session_create(void *dev, struct rte_security_session_conf *conf,
+ struct rte_security_session *sess, struct rte_mempool *mempool);
+int
+qat_security_session_destroy(void *dev, struct rte_security_session *sess);
+#endif
+
#endif /* _QAT_SYM_SESSION_H_ */