crypto/virtio: fix IV physical address
authorFan Zhang <roy.fan.zhang@intel.com>
Tue, 26 Jun 2018 02:10:48 +0000 (03:10 +0100)
committerPablo de Lara <pablo.de.lara.guarch@intel.com>
Tue, 10 Jul 2018 22:57:51 +0000 (00:57 +0200)
The physical address of IV required by Virtio was computed using
crypto operations' physical address plus the offset. However not
all crypto ops will have physical address field initialized and
compute it runtimely is costly. This patch fixes this problem by
adding iv field in virtio_crypto_op_cookie and does a memcpy of
iv instead.

Fixes: 82adb12a1fce ("crypto/virtio: support burst enqueue/dequeue")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Reviewed-by: Jay Zhou <jianjay.zhou@huawei.com>
drivers/crypto/virtio/virtio_cryptodev.c
drivers/crypto/virtio/virtio_cryptodev.h
drivers/crypto/virtio/virtio_rxtx.c

index df88953..f9c890b 100644 (file)
@@ -1223,6 +1223,12 @@ virtio_crypto_sym_pad_op_ctrl_req(
        /* Get cipher xform from crypto xform chain */
        cipher_xform = virtio_crypto_get_cipher_xform(xform);
        if (cipher_xform) {
+               if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+                       VIRTIO_CRYPTO_SESSION_LOG_ERR(
+                               "cipher IV size cannot be longer than %u",
+                               VIRTIO_CRYPTO_MAX_IV_SIZE);
+                       return -1;
+               }
                if (is_chainned)
                        ret = virtio_crypto_sym_pad_cipher_param(
                                &ctrl->u.sym_create_session.u.chain.para
index e402c03..0fd7b72 100644 (file)
@@ -16,6 +16,8 @@
 
 #define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
 
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
 extern uint8_t cryptodev_virtio_driver_id;
 
 enum virtio_crypto_cmd_id {
@@ -29,6 +31,7 @@ struct virtio_crypto_op_cookie {
        struct virtio_crypto_op_data_req data_req;
        struct virtio_crypto_inhdr inhdr;
        struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+       uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
 };
 
 /*
index 4503928..4f695f3 100644 (file)
@@ -203,6 +203,8 @@ virtqueue_crypto_sym_enqueue_xmit(
        uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
        uint32_t indirect_vring_addr_offset = req_data_len +
                sizeof(struct virtio_crypto_inhdr);
+       uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+                       sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
        struct rte_crypto_sym_op *sym_op = cop->sym;
        struct virtio_crypto_session *session =
                (struct virtio_crypto_session *)get_session_private_data(
@@ -259,7 +261,17 @@ virtqueue_crypto_sym_enqueue_xmit(
 
        /* indirect vring: iv of cipher */
        if (session->iv.length) {
-               desc[idx].addr = cop->phys_addr + session->iv.offset;
+               if (cop->phys_addr)
+                       desc[idx].addr = cop->phys_addr + session->iv.offset;
+               else {
+                       rte_memcpy(crypto_op_cookie->iv,
+                                       rte_crypto_op_ctod_offset(cop,
+                                       uint8_t *, session->iv.offset),
+                                       session->iv.length);
+                       desc[idx].addr = indirect_op_data_req_phys_addr +
+                               indirect_iv_addr_offset;
+               }
+
                desc[idx].len = session->iv.length;
                desc[idx++].flags = VRING_DESC_F_NEXT;
        }