common/mlx5: fix user mode register access command
[dpdk.git] / lib / librte_vhost / vhost_crypto.c
index 0edf12d..0f9df40 100644 (file)
                (1 << VIRTIO_RING_F_EVENT_IDX) |                        \
                (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |                   \
                (1 << VIRTIO_CRYPTO_SERVICE_MAC) |                      \
-               (1 << VIRTIO_NET_F_CTRL_VQ))
+               (1 << VIRTIO_NET_F_CTRL_VQ) |                           \
+               (1 << VHOST_USER_PROTOCOL_F_CONFIG))
 
 #define IOVA_TO_VVA(t, r, a, l, p)                                     \
        ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
 
 static int
-cipher_algo_transform(uint32_t virtio_cipher_algo)
+cipher_algo_transform(uint32_t virtio_cipher_algo,
+               enum rte_crypto_cipher_algorithm *algo)
 {
-       int ret;
-
        switch (virtio_cipher_algo) {
        case VIRTIO_CRYPTO_CIPHER_AES_CBC:
-               ret = RTE_CRYPTO_CIPHER_AES_CBC;
+               *algo = RTE_CRYPTO_CIPHER_AES_CBC;
                break;
        case VIRTIO_CRYPTO_CIPHER_AES_CTR:
-               ret = RTE_CRYPTO_CIPHER_AES_CTR;
+               *algo = RTE_CRYPTO_CIPHER_AES_CTR;
                break;
        case VIRTIO_CRYPTO_CIPHER_DES_ECB:
-               ret = -VIRTIO_CRYPTO_NOTSUPP;
+               *algo = -VIRTIO_CRYPTO_NOTSUPP;
                break;
        case VIRTIO_CRYPTO_CIPHER_DES_CBC:
-               ret = RTE_CRYPTO_CIPHER_DES_CBC;
+               *algo = RTE_CRYPTO_CIPHER_DES_CBC;
                break;
        case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
-               ret = RTE_CRYPTO_CIPHER_3DES_ECB;
+               *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
                break;
        case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
-               ret = RTE_CRYPTO_CIPHER_3DES_CBC;
+               *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
                break;
        case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
-               ret = RTE_CRYPTO_CIPHER_3DES_CTR;
+               *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
                break;
        case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
-               ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
+               *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
                break;
        case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
-               ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
+               *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
                break;
        case VIRTIO_CRYPTO_CIPHER_AES_F8:
-               ret = RTE_CRYPTO_CIPHER_AES_F8;
+               *algo = RTE_CRYPTO_CIPHER_AES_F8;
                break;
        case VIRTIO_CRYPTO_CIPHER_AES_XTS:
-               ret = RTE_CRYPTO_CIPHER_AES_XTS;
+               *algo = RTE_CRYPTO_CIPHER_AES_XTS;
                break;
        case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
-               ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
+               *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
                break;
        default:
-               ret = -VIRTIO_CRYPTO_BADMSG;
+               return -VIRTIO_CRYPTO_BADMSG;
                break;
        }
 
-       return ret;
+       return 0;
 }
 
 static int
-auth_algo_transform(uint32_t virtio_auth_algo)
+auth_algo_transform(uint32_t virtio_auth_algo,
+               enum rte_crypto_auth_algorithm *algo)
 {
-       int ret;
-
        switch (virtio_auth_algo) {
-
        case VIRTIO_CRYPTO_NO_MAC:
-               ret = RTE_CRYPTO_AUTH_NULL;
+               *algo = RTE_CRYPTO_AUTH_NULL;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_MD5:
-               ret = RTE_CRYPTO_AUTH_MD5_HMAC;
+               *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
-               ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
+               *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
-               ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
+               *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
-               ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
+               *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
-               ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
+               *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
-               ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
-               break;
-       case VIRTIO_CRYPTO_MAC_CMAC_3DES:
-               ret = -VIRTIO_CRYPTO_NOTSUPP;
+               *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
                break;
        case VIRTIO_CRYPTO_MAC_CMAC_AES:
-               ret = RTE_CRYPTO_AUTH_AES_CMAC;
+               *algo = RTE_CRYPTO_AUTH_AES_CMAC;
                break;
        case VIRTIO_CRYPTO_MAC_KASUMI_F9:
-               ret = RTE_CRYPTO_AUTH_KASUMI_F9;
+               *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
                break;
        case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
-               ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
+               *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
                break;
        case VIRTIO_CRYPTO_MAC_GMAC_AES:
-               ret = RTE_CRYPTO_AUTH_AES_GMAC;
-               break;
-       case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
-               ret = -VIRTIO_CRYPTO_NOTSUPP;
+               *algo = RTE_CRYPTO_AUTH_AES_GMAC;
                break;
        case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
-               ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
-               break;
-       case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
-               ret = -VIRTIO_CRYPTO_NOTSUPP;
+               *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
                break;
        case VIRTIO_CRYPTO_MAC_XCBC_AES:
-               ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
+               *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
                break;
+       case VIRTIO_CRYPTO_MAC_CMAC_3DES:
+       case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
+       case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
+               return -VIRTIO_CRYPTO_NOTSUPP;
        default:
-               ret = -VIRTIO_CRYPTO_BADMSG;
-               break;
+               return -VIRTIO_CRYPTO_BADMSG;
        }
 
-       return ret;
+       return 0;
 }
 
 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
@@ -242,12 +234,16 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform,
 {
        int ret;
 
-       ret = cipher_algo_transform(param->cipher_algo);
+       ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
        if (unlikely(ret < 0))
                return ret;
 
+       if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
+               VC_LOG_DBG("Invalid cipher key length\n");
+               return -VIRTIO_CRYPTO_BADMSG;
+       }
+
        xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-       xform->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
        xform->cipher.key.length = param->cipher_key_len;
        if (xform->cipher.key.length > 0)
                xform->cipher.key.data = param->cipher_key_buf;
@@ -293,11 +289,17 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms,
        }
 
        /* cipher */
-       ret = cipher_algo_transform(param->cipher_algo);
+       ret = cipher_algo_transform(param->cipher_algo,
+                       &xform_cipher->cipher.algo);
        if (unlikely(ret < 0))
                return ret;
+
+       if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
+               VC_LOG_DBG("Invalid cipher key length\n");
+               return -VIRTIO_CRYPTO_BADMSG;
+       }
+
        xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-       xform_cipher->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
        xform_cipher->cipher.key.length = param->cipher_key_len;
        xform_cipher->cipher.key.data = param->cipher_key_buf;
        ret = get_iv_len(xform_cipher->cipher.algo);
@@ -308,10 +310,15 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms,
 
        /* auth */
        xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
-       ret = auth_algo_transform(param->hash_algo);
+       ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
        if (unlikely(ret < 0))
                return ret;
-       xform_auth->auth.algo = (enum rte_crypto_auth_algorithm)ret;
+
+       if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
+               VC_LOG_DBG("Invalid auth key length\n");
+               return -VIRTIO_CRYPTO_BADMSG;
+       }
+
        xform_auth->auth.digest_length = param->digest_len;
        xform_auth->auth.key.length = param->auth_key_len;
        xform_auth->auth.key.data = param->auth_key_buf;
@@ -1024,7 +1031,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
                if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
                                vc_req, &desc, chain->para.src_data_len,
-                               nb_descs, vq_size)) < 0) {
+                               nb_descs, vq_size) < 0)) {
                        ret = VIRTIO_CRYPTO_BADMSG;
                        goto error_exit;
                }
@@ -1358,7 +1365,7 @@ vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
        return processed;
 }
 
-int __rte_experimental
+int
 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
                struct rte_mempool *sess_pool,
                struct rte_mempool *sess_priv_pool,
@@ -1451,7 +1458,7 @@ error_exit:
        return ret;
 }
 
-int __rte_experimental
+int
 rte_vhost_crypto_free(int vid)
 {
        struct virtio_net *dev = get_device(vid);
@@ -1480,7 +1487,7 @@ rte_vhost_crypto_free(int vid)
        return 0;
 }
 
-int __rte_experimental
+int
 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
 {
        struct virtio_net *dev = get_device(vid);
@@ -1535,7 +1542,7 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
        return 0;
 }
 
-uint16_t __rte_experimental
+uint16_t
 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
@@ -1550,18 +1557,18 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
 
        if (unlikely(dev == NULL)) {
                VC_LOG_ERR("Invalid vid %i", vid);
-               return -EINVAL;
+               return 0;
        }
 
        if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
                VC_LOG_ERR("Invalid qid %u", qid);
-               return -EINVAL;
+               return 0;
        }
 
        vcrypto = (struct vhost_crypto *)dev->extern_data;
        if (unlikely(vcrypto == NULL)) {
                VC_LOG_ERR("Cannot find required data, is it initialized?");
-               return -ENOENT;
+               return 0;
        }
 
        vq = dev->virtqueue[qid];
@@ -1583,7 +1590,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
                if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
                                (void **)mbufs, count * 2) < 0)) {
                        VC_LOG_ERR("Insufficient memory");
-                       return -ENOMEM;
+                       return 0;
                }
 
                for (i = 0; i < count; i++) {
@@ -1598,7 +1605,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
                        op->sym->m_dst->data_off = 0;
 
                        if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
-                                       op, head, desc_idx)) < 0)
+                                       op, head, desc_idx) < 0))
                                break;
                }
 
@@ -1613,7 +1620,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
                if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
                                (void **)mbufs, count) < 0)) {
                        VC_LOG_ERR("Insufficient memory");
-                       return -ENOMEM;
+                       return 0;
                }
 
                for (i = 0; i < count; i++) {
@@ -1645,7 +1652,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
        return i;
 }
 
-uint16_t __rte_experimental
+uint16_t
 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
                uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
 {