net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / qat / qat_sym.h
index e3ec7f0..074612c 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD     crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO       (1 << 0)
+#define QAT_SYM_CAP_VALID              (1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)                                        \
+       {                                                               \
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
+               {.sym = {                                               \
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,        \
+                       {.auth = {                                      \
+                               .algo = RTE_CRYPTO_AUTH_##n,            \
+                               b, d                                    \
+                       }, }                                            \
+               }, }                                                    \
+       }
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)                             \
+       {                                                               \
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
+               {.sym = {                                               \
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,        \
+                       {.auth = {                                      \
+                               .algo = RTE_CRYPTO_AUTH_##n,            \
+                               b, k, d, a, i                           \
+                       }, }                                            \
+               }, }                                                    \
+       }
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)                             \
+       {                                                               \
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
+               {.sym = {                                               \
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,        \
+                       {.aead = {                                      \
+                               .algo = RTE_CRYPTO_AEAD_##n,            \
+                               b, k, d, a, i                           \
+                       }, }                                            \
+               }, }                                                    \
+       }
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)                                 \
+       {                                                               \
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,                     \
+               {.sym = {                                               \
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,      \
+                       {.cipher = {                                    \
+                               .algo = RTE_CRYPTO_CIPHER_##n,          \
+                               b, k, i                                 \
+                       }, }                                            \
+               }, }                                                    \
+       }
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,10 +115,21 @@ struct qat_sym_op_cookie {
        } opt;
 };
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-               void *op_cookie, enum qat_device_gen qat_dev_gen);
+struct qat_sym_dp_ctx {
+       struct qat_sym_session *session;
+       uint32_t tail;
+       uint32_t head;
+       uint16_t cached_enqueue;
+       uint16_t cached_dequeue;
+};
 
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+               uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+               uint16_t nb_ops);
 
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -213,17 +285,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
                }
        }
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-                               uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+               uint64_t *dequeue_err_count __rte_unused)
 {
        struct icp_qat_fw_comn_resp *resp_msg =
                        (struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +348,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
        }
 
        *op = (void *)rx_op;
+
+       /*
+        * return 1 as dequeue op only move on to the next op
+        * if one was ready to return to API
+        */
+       return 1;
 }
 
 int
@@ -293,6 +365,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+               struct qat_sym_session *ctx,
+               struct rte_crypto_vec *vec, uint32_t vec_len,
+               struct rte_crypto_va_iova_ptr *cipher_iv,
+               struct rte_crypto_va_iova_ptr *auth_iv,
+               struct rte_crypto_va_iova_ptr *aad,
+               struct rte_crypto_va_iova_ptr *digest)
+{
+       uint32_t i;
+
+       QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+                       sizeof(struct icp_qat_fw_la_bulk_req));
+       for (i = 0; i < vec_len; i++)
+               QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+       if (cipher_iv && ctx->cipher_iv.length > 0)
+               QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+                               ctx->cipher_iv.length);
+       if (auth_iv && ctx->auth_iv.length > 0)
+               QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+                               ctx->auth_iv.length);
+       if (aad && ctx->aad_len > 0)
+               QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+                               ctx->aad_len);
+       if (digest && ctx->digest_length > 0)
+               QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+                               ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+               struct qat_sym_session *ctx __rte_unused,
+               struct rte_crypto_vec *vec __rte_unused,
+               uint32_t vec_len __rte_unused,
+               struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+               struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+               struct rte_crypto_va_iova_ptr *aad __rte_unused,
+               struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +425,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */