crypto/qat: extend support for digest-encrypted auth-cipher
authorDamian Nowak <damianx.nowak@intel.com>
Wed, 3 Jul 2019 11:15:53 +0000 (13:15 +0200)
committerAkhil Goyal <akhil.goyal@nxp.com>
Fri, 5 Jul 2019 13:28:14 +0000 (15:28 +0200)
This patch adds condition to be met when using
out-of-place auth-cipher operations. It checks
if the digest location overlaps with the data to
be encrypted or decrypted and if so, treats as a
digest-encrypted case.
Patch adds checking, if the digest is being
encrypted or decrypted partially and extends PMD
buffers accordingly.
It also adds feature flag for QuickAssist
Technology to emphasize it's support for digest
appended auth-cipher operations.

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>
doc/guides/cryptodevs/features/qat.ini
doc/guides/rel_notes/release_19_08.rst
drivers/crypto/qat/qat_sym.c
drivers/crypto/qat/qat_sym_pmd.c

index e8f9060..0832e59 100644 (file)
@@ -12,6 +12,7 @@ OOP SGL In SGL Out     = Y
 OOP SGL In LB  Out     = Y
 OOP LB  In SGL Out     = Y
 OOP LB  In LB  Out     = Y
+Digest encrypted       = Y
 
 ;
 ; Supported crypto algorithms of the 'qat' crypto driver.
index fd3b4b6..53aead7 100644 (file)
@@ -139,6 +139,11 @@ New Features
   (Programmable  Acceleration Card) N3000.  See the
   :doc:`../bbdevs/fpga_lte_fec` BBDEV guide for more details on this new driver.
 
+* **Updated the QuickAssist Technology (QAT) symmetric crypto PMD.**
+
+  Added support for digest-encrypted cases where digest is appended
+  to the data.
+
 * **Added Intel QuickData Technology PMD**
 
   The PMD for Intel\ |reg|  QuickData Technology, part of
index 7515a55..2dc0614 100644 (file)
@@ -156,8 +156,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
        uint32_t auth_len = 0, auth_ofs = 0;
        uint32_t min_ofs = 0;
        uint64_t src_buf_start = 0, dst_buf_start = 0;
+       uint64_t digest_start = 0;
        uint8_t do_sgl = 0;
-       uint8_t wireless_auth = 0, in_place = 1;
+       uint8_t in_place = 1;
        struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
        struct qat_sym_op_cookie *cookie =
                                (struct qat_sym_op_cookie *)op_cookie;
@@ -270,7 +271,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                        }
                        auth_ofs = op->sym->auth.data.offset >> 3;
                        auth_len = op->sym->auth.data.length >> 3;
-                       wireless_auth = 1;
 
                        auth_param->u1.aad_adr =
                                        rte_crypto_op_ctophys_offset(op,
@@ -493,6 +493,53 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
                (cipher_param->cipher_offset + cipher_param->cipher_length)
                : (auth_param->auth_off + auth_param->auth_len);
 
+       if (do_auth && do_cipher) {
+               if (do_sgl) {
+                       uint32_t remaining_off = auth_param->auth_off +
+                               auth_param->auth_len;
+                       struct rte_mbuf *sgl_buf =
+                               (in_place ?
+                               op->sym->m_src : op->sym->m_dst);
+                       while (remaining_off >= rte_pktmbuf_data_len(
+                                       sgl_buf)) {
+                               remaining_off -= rte_pktmbuf_data_len(
+                                               sgl_buf);
+                               sgl_buf = sgl_buf->next;
+                       }
+                       digest_start = (uint64_t)rte_pktmbuf_iova_offset(
+                               sgl_buf, remaining_off);
+               } else {
+                       digest_start = (in_place ?
+                               src_buf_start : dst_buf_start) +
+                               auth_param->auth_off + auth_param->auth_len;
+               }
+               /* Handle cases of auth-gen-then-cipher and
+                * cipher-decrypt-then-auth-verify with digest encrypted
+                */
+               if ((auth_param->auth_off + auth_param->auth_len <
+                                       cipher_param->cipher_offset +
+                                       cipher_param->cipher_length) &&
+                               (op->sym->auth.digest.phys_addr ==
+                                       digest_start)) {
+                       /* Handle partial digest encryption */
+                       if (cipher_param->cipher_offset +
+                                       cipher_param->cipher_length <
+                                       auth_param->auth_off +
+                                       auth_param->auth_len +
+                                       ctx->digest_length)
+                               qat_req->comn_mid.dst_length =
+                                       qat_req->comn_mid.src_length =
+                                       auth_param->auth_off +
+                                       auth_param->auth_len +
+                                       ctx->digest_length;
+                       struct icp_qat_fw_comn_req_hdr *header =
+                               &qat_req->comn_hdr;
+                       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+                               header->serv_specif_flags,
+                               ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+               }
+       }
+
        if (do_sgl) {
 
                ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
@@ -535,18 +582,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
        } else {
                qat_req->comn_mid.src_data_addr = src_buf_start;
                qat_req->comn_mid.dest_data_addr = dst_buf_start;
-               /* handle case of auth-gen-then-cipher with digest encrypted */
-               if (wireless_auth && in_place &&
-                   (op->sym->auth.digest.phys_addr ==
-                               src_buf_start + auth_ofs + auth_len) &&
-                   (auth_ofs + auth_len + ctx->digest_length <=
-                               cipher_ofs + cipher_len)) {
-                       struct icp_qat_fw_comn_req_hdr *header =
-                                               &qat_req->comn_hdr;
-                       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-                               header->serv_specif_flags,
-                               ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-               }
        }
 
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
index af21270..71f21ce 100644 (file)
@@ -278,7 +278,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
                        RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
                        RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
                        RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
-                       RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+                       RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+                       RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
        internals = cryptodev->data->dev_private;
        internals->qat_dev = qat_pci_dev;