crypto/qat: support DOCSIS protocol
[dpdk.git] / drivers / crypto / qat / qat_sym.h
index e387317..02e7239 100644 (file)
  * Copyright(c) 2015-2018 Intel Corporation
  */
 
-#ifndef _QAT_CRYPTO_H_
-#define _QAT_CRYPTO_H_
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
 
 #include <rte_cryptodev_pmd.h>
-#include <rte_memzone.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_net_crc.h>
+#endif
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
 
 #include "qat_common.h"
-#include "qat_device.h"
-#include "qat_crypto_capabilities.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH    8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
 /*
- * This macro rounds up a number to a be a multiple of
- * the alignment when the alignment is a power of 2
+ * Maximum number of SGL entries
  */
-#define ALIGN_POW2_ROUNDUP(num, align) \
-       (((num) + (align) - 1) & ~((align) - 1))
-#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
-#define QAT_CSR_HEAD_WRITE_THRESH 32U
-/* number of requests to accumulate before writing head CSR */
-#define QAT_CSR_TAIL_WRITE_THRESH 32U
-/* number of requests to accumulate before writing tail CSR */
-#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
-/* number of inflights below which no tail write coalescing should occur */
+struct qat_sym_session;
 
-struct qat_session;
+struct qat_sym_sgl {
+       qat_sgl_hdr;
+       struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
 
-/**
- * Structure associated with each queue.
- */
-struct qat_queue {
-       char            memz_name[RTE_MEMZONE_NAMESIZE];
-       void            *base_addr;             /* Base address */
-       rte_iova_t      base_phys_addr;         /* Queue physical address */
-       uint32_t        head;                   /* Shadow copy of the head */
-       uint32_t        tail;                   /* Shadow copy of the tail */
-       uint32_t        modulo;
-       uint32_t        msg_size;
-       uint16_t        max_inflights;
-       uint32_t        queue_size;
-       uint8_t         hw_bundle_number;
-       uint8_t         hw_queue_number;
-       /* HW queue aka ring offset on bundle */
-       uint32_t        csr_head;               /* last written head value */
-       uint32_t        csr_tail;               /* last written tail value */
-       uint16_t        nb_processed_responses;
-       /* number of responses processed since last CSR head write */
-       uint16_t        nb_pending_requests;
-       /* number of requests pending since last CSR tail write */
+struct qat_sym_op_cookie {
+       struct qat_sym_sgl qat_sgl_src;
+       struct qat_sym_sgl qat_sgl_dst;
+       phys_addr_t qat_sgl_src_phys_addr;
+       phys_addr_t qat_sgl_dst_phys_addr;
 };
 
-struct qat_qp {
-       void                    *mmap_bar_addr;
-       uint16_t                inflights16;
-       struct  qat_queue       tx_q;
-       struct  qat_queue       rx_q;
-       struct  rte_cryptodev_stats stats;
-       struct rte_mempool *op_cookie_pool;
-       void **op_cookies;
-       uint32_t nb_descriptors;
-       enum qat_device_gen qat_dev_gen;
-} __rte_cache_aligned;
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
-       struct rte_cryptodev_stats *stats);
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
-       const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
-       struct rte_mempool *session_pool);
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
-       uint16_t queue_pair_id);
-
-
-extern uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-               uint16_t nb_ops);
-
-extern uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-               uint16_t nb_ops);
-
-#endif /* _QAT_CRYPTO_H_ */
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+               void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ *  Depends on openssl libcrypto
+ *  Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+               uint8_t *iv, int ivlen, int srclen,
+               void *bpi_ctx)
+{
+       EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+       int encrypted_ivlen;
+       uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+       uint8_t *encr = encrypted_iv;
+
+       /* ECB method: encrypt the IV, then XOR this with plaintext */
+       if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+                                                               <= 0)
+               goto cipher_encrypt_err;
+
+       for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+               *dst = *src ^ *encr;
+
+       return 0;
+
+cipher_encrypt_err:
+       QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+       return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+                               struct rte_crypto_op *op)
+{
+       int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       uint8_t last_block_len = block_len > 0 ?
+                       sym_op->cipher.data.length % block_len : 0;
+
+       if (last_block_len > 0 &&
+                       ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+               /* Encrypt last block */
+               uint8_t *last_block, *dst, *iv;
+               uint32_t last_block_offset;
+
+               last_block_offset = sym_op->cipher.data.offset +
+                               sym_op->cipher.data.length - last_block_len;
+               last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+                               uint8_t *, last_block_offset);
+
+               if (unlikely(sym_op->m_dst != NULL))
+                       /* out-of-place operation (OOP) */
+                       dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+                                               uint8_t *, last_block_offset);
+               else
+                       dst = last_block;
+
+               if (last_block_len < sym_op->cipher.data.length)
+                       /* use previous block ciphertext as IV */
+                       iv = dst - block_len;
+               else
+                       /* runt block, i.e. less than one full block */
+                       iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+                                       ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+                       last_block, last_block_len);
+               if (sym_op->m_dst != NULL)
+                       QAT_DP_HEXDUMP_LOG(DEBUG,
+                               "BPI: dst before post-process:",
+                               dst, last_block_len);
+#endif
+               bpi_cipher_encrypt(last_block, dst, iv, block_len,
+                               last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+               QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+                               last_block, last_block_len);
+               if (sym_op->m_dst != NULL)
+                       QAT_DP_HEXDUMP_LOG(DEBUG,
+                               "BPI: dst after post-process:",
+                               dst, last_block_len);
+#endif
+       }
+       return sym_op->cipher.data.length - last_block_len;
+}
+
+#ifdef RTE_LIBRTE_SECURITY
+static inline void
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
+{
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       uint32_t crc_offset, crc_length, crc;
+       uint8_t *crc_data;
+
+       if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+                       sym_op->auth.data.length != 0) {
+
+               crc_offset = sym_op->auth.data.offset;
+               crc_length = sym_op->auth.data.length;
+               crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+                               crc_offset);
+
+               crc = rte_net_crc_calc(crc_data, crc_length, RTE_NET_CRC32_ETH);
+
+               if (crc != *(uint32_t *)(crc_data + crc_length))
+                       op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+       }
+}
+#endif
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+       struct icp_qat_fw_comn_resp *resp_msg =
+                       (struct icp_qat_fw_comn_resp *)resp;
+       struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+                       (resp_msg->opaque_data);
+       struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+                       sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+       if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+                       ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+                       resp_msg->comn_hdr.comn_status)) {
+
+               rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+       } else {
+#ifdef RTE_LIBRTE_SECURITY
+               uint8_t is_docsis_sec = 0;
+
+               if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                       /*
+                        * Assuming at this point that if it's a security
+                        * op, that this is for DOCSIS
+                        */
+                       sess = (struct qat_sym_session *)
+                                       get_sec_session_private_data(
+                                       rx_op->sym->sec_session);
+                       is_docsis_sec = 1;
+               } else
+#endif
+               {
+                       sess = (struct qat_sym_session *)
+                                       get_sym_session_private_data(
+                                       rx_op->sym->session,
+                                       cryptodev_qat_driver_id);
+               }
+
+               rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+               if (sess->bpi_ctx) {
+                       qat_bpicipher_postprocess(sess, rx_op);
+#ifdef RTE_LIBRTE_SECURITY
+                       if (is_docsis_sec)
+                               qat_crc_verify(sess, rx_op);
+#endif
+               }
+       }
+       *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */