net/bnxt: replace memory barrier for doorbell response
[dpdk.git] / drivers / crypto / qat / qat_sym.c
index 8273968..5ff4aa1 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2019 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -12,6 +12,7 @@
 
 #include "qat_sym.h"
 
+
 /** Decrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -156,7 +157,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
        uint32_t auth_len = 0, auth_ofs = 0;
        uint32_t min_ofs = 0;
        uint64_t src_buf_start = 0, dst_buf_start = 0;
+       uint64_t auth_data_end = 0;
        uint8_t do_sgl = 0;
+       uint8_t in_place = 1;
+       int alignment_adjustment = 0;
        struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
        struct qat_sym_op_cookie *cookie =
                                (struct qat_sym_op_cookie *)op_cookie;
@@ -192,7 +196,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
        rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
        qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
        cipher_param = (void *)&qat_req->serv_specif_rqpars;
-       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+       auth_param = (void *)((uint8_t *)cipher_param +
+                       ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
 
        if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
                        ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
@@ -438,6 +443,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                 * Don't align DMA start. DMA the minimum data-set
                 * so as not to overwrite data in dest buffer
                 */
+               in_place = 0;
                src_buf_start =
                        rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
                dst_buf_start =
@@ -462,6 +468,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                                                                min_ofs);
                }
                dst_buf_start = src_buf_start;
+
+               /* remember any adjustment for later, note, can be +/- */
+               alignment_adjustment = src_buf_start -
+                       rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
        }
 
        if (do_cipher || do_aead) {
@@ -490,14 +500,66 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                (cipher_param->cipher_offset + cipher_param->cipher_length)
                : (auth_param->auth_off + auth_param->auth_len);
 
+       if (do_auth && do_cipher) {
+               /* Handle digest-encrypted cases, i.e.
+                * auth-gen-then-cipher-encrypt and
+                * cipher-decrypt-then-auth-verify
+                */
+                /* First find the end of the data */
+               if (do_sgl) {
+                       uint32_t remaining_off = auth_param->auth_off +
+                               auth_param->auth_len + alignment_adjustment;
+                       struct rte_mbuf *sgl_buf =
+                               (in_place ?
+                                       op->sym->m_src : op->sym->m_dst);
+
+                       while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+                                       && sgl_buf->next != NULL) {
+                               remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+                               sgl_buf = sgl_buf->next;
+                       }
+
+                       auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
+                               sgl_buf, remaining_off);
+               } else {
+                       auth_data_end = (in_place ?
+                               src_buf_start : dst_buf_start) +
+                               auth_param->auth_off + auth_param->auth_len;
+               }
+               /* Then check if digest-encrypted conditions are met */
+               if ((auth_param->auth_off + auth_param->auth_len <
+                                       cipher_param->cipher_offset +
+                                       cipher_param->cipher_length) &&
+                               (op->sym->auth.digest.phys_addr ==
+                                       auth_data_end)) {
+                       /* Handle partial digest encryption */
+                       if (cipher_param->cipher_offset +
+                                       cipher_param->cipher_length <
+                                       auth_param->auth_off +
+                                       auth_param->auth_len +
+                                       ctx->digest_length)
+                               qat_req->comn_mid.dst_length =
+                                       qat_req->comn_mid.src_length =
+                                       auth_param->auth_off +
+                                       auth_param->auth_len +
+                                       ctx->digest_length;
+                       struct icp_qat_fw_comn_req_hdr *header =
+                               &qat_req->comn_hdr;
+                       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+                               header->serv_specif_flags,
+                               ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+               }
+       }
+
        if (do_sgl) {
 
                ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
                                QAT_COMN_PTR_TYPE_SGL);
-               ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
-                                       &cookie->qat_sgl_src,
-                                       qat_req->comn_mid.src_length,
-                                       QAT_SYM_SGL_MAX_NUMBER);
+               ret = qat_sgl_fill_array(op->sym->m_src,
+                  (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+                  &cookie->qat_sgl_src,
+                  qat_req->comn_mid.src_length,
+                  QAT_SYM_SGL_MAX_NUMBER);
 
                if (unlikely(ret)) {
                        QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
@@ -510,10 +572,11 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                                cookie->qat_sgl_src_phys_addr;
                else {
                        ret = qat_sgl_fill_array(op->sym->m_dst,
-                                                dst_buf_start,
-                                                &cookie->qat_sgl_dst,
-                                                qat_req->comn_mid.dst_length,
-                                                QAT_SYM_SGL_MAX_NUMBER);
+                               (int64_t)(dst_buf_start -
+                                         rte_pktmbuf_iova(op->sym->m_dst)),
+                                &cookie->qat_sgl_dst,
+                                qat_req->comn_mid.dst_length,
+                                QAT_SYM_SGL_MAX_NUMBER);
 
                        if (unlikely(ret)) {
                                QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
@@ -525,11 +588,20 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                        qat_req->comn_mid.dest_data_addr =
                                        cookie->qat_sgl_dst_phys_addr;
                }
+               qat_req->comn_mid.src_length = 0;
+               qat_req->comn_mid.dst_length = 0;
        } else {
                qat_req->comn_mid.src_data_addr = src_buf_start;
                qat_req->comn_mid.dest_data_addr = dst_buf_start;
        }
 
+       /* Handle Single-Pass GCM */
+       if (ctx->is_single_pass) {
+               cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr;
+               cipher_param->spc_auth_res_addr =
+                               op->sym->aead.digest.phys_addr;
+       }
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
        QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
                        sizeof(struct icp_qat_fw_la_bulk_req));