From 2d03ec6abd2e32b2bf19369bc2cf975214cecc7a Mon Sep 17 00:00:00 2001 From: Tomasz Kulasek Date: Fri, 13 Jan 2017 16:23:15 +0100 Subject: [PATCH] crypto: support scatter-gather in software drivers This patch introduces RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER feature flag informing that selected crypto device supports segmented mbufs natively and doesn't need to be coalesced before crypto operation. While using segmented buffers in crypto devices may have unpredictable results, for PMDs which doesn't support it natively, additional check is made for debug compilation. Signed-off-by: Tomasz Kulasek Acked-by: Declan Doherty --- drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 14 ++++++++++++++ drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 19 ++++++++++++++++--- drivers/crypto/kasumi/rte_kasumi_pmd.c | 13 +++++++++++++ drivers/crypto/null/null_crypto_pmd.c | 3 ++- drivers/crypto/snow3g/rte_snow3g_pmd.c | 15 +++++++++++++++ drivers/crypto/zuc/rte_zuc_pmd.c | 13 +++++++++++++ lib/librte_cryptodev/rte_cryptodev.c | 4 ++-- lib/librte_cryptodev/rte_cryptodev.h | 2 ++ 8 files changed, 77 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index af3d60f0cd..5af22f72da 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -377,6 +377,20 @@ aesni_gcm_pmd_enqueue_burst(void *queue_pair, break; } +#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG + if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || + (ops[i]->sym->m_dst != NULL && + !rte_pktmbuf_is_contiguous( + ops[i]->sym->m_dst))) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + GCM_LOG_ERR("PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " + "source/destination buffer.\n", ops[i]); + qp->qp_stats.enqueue_err_count++; + break; + } +#endif + retval = process_gcm_crypto_op(qp, ops[i]->sym, sess); if (retval < 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 6d27d75285..25f681be1b 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -571,15 +571,28 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, int i, processed_jobs = 0; for (i = 0; i < nb_ops; i++) { -#ifdef RTE_LIBRTE_AESNI_MB_DEBUG - if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { +#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG + if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { MB_LOG_ERR("PMD only supports symmetric crypto " "operation requests, op (%p) is not a " - "symmetric operation.", op); + "symmetric operation.", ops[i]); + qp->stats.enqueue_err_count++; + goto flush_jobs; + } + + if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || + (ops[i]->sym->m_dst != NULL && + !rte_pktmbuf_is_contiguous( + ops[i]->sym->m_dst))) { + MB_LOG_ERR("PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " + "source/destination buffer.\n", ops[i]); + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; qp->stats.enqueue_err_count++; goto flush_jobs; } #endif + sess = get_session(qp, ops[i]); if (unlikely(sess == NULL)) { qp->stats.enqueue_err_count++; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c index b119da28c9..4bdd7bbf71 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -455,6 +455,19 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, for (i = 0; i < nb_ops; i++) { curr_c_op = ops[i]; +#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG + if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) || + (curr_c_op->sym->m_dst != NULL && + !rte_pktmbuf_is_contiguous( + curr_c_op->sym->m_dst))) { + KASUMI_LOG_ERR("PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " + "source/destination buffer.\n", curr_c_op); + curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } +#endif + /* Set status as enqueued (not processed yet) by default. */ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c index c69606b390..c37d3d654a 100644 --- a/drivers/crypto/null/null_crypto_pmd.c +++ b/drivers/crypto/null/null_crypto_pmd.c @@ -216,7 +216,8 @@ cryptodev_null_create(const char *name, dev->enqueue_burst = null_crypto_pmd_enqueue_burst; dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; internals = dev->data->dev_private; diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index 3b4292a698..9a6f16d1b6 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -330,6 +330,21 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, unsigned i; unsigned enqueued_ops, processed_ops; +#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG + for (i = 0; i < num_ops; i++) { + if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || + (ops[i]->sym->m_dst != NULL && + !rte_pktmbuf_is_contiguous( + ops[i]->sym->m_dst))) { + SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " + "source/destination buffer.\n", ops[i]); + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return 0; + } + } +#endif + switch (session->op) { case SNOW3G_OP_ONLY_CIPHER: processed_ops = process_snow3g_cipher_op(ops, diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c index 3849119350..bf53f76381 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd.c +++ b/drivers/crypto/zuc/rte_zuc_pmd.c @@ -211,6 +211,19 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, break; } +#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG + if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || + (ops[i]->sym->m_dst != NULL && + !rte_pktmbuf_is_contiguous( + ops[i]->sym->m_dst))) { + ZUC_LOG_ERR("PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " + "source/destination buffer.\n", ops[i]); + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } +#endif + src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); dst[i] = ops[i]->sym->m_dst ? diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index 54e95d5c67..bbab4b30ac 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -211,13 +211,13 @@ rte_cryptodev_get_feature_name(uint64_t flag) return "CPU_AESNI"; case RTE_CRYPTODEV_FF_HW_ACCELERATED: return "HW_ACCELERATED"; - + case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER: + return "MBUF_SCATTER_GATHER"; default: return NULL; } } - int rte_cryptodev_create_vdev(const char *name, const char *args) { diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h index 29d8eece87..fa311a9e5e 100644 --- a/lib/librte_cryptodev/rte_cryptodev.h +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -227,6 +227,8 @@ struct rte_cryptodev_capabilities { /**< Operations are off-loaded to an external hardware accelerator */ #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8) /**< Utilises CPU SIMD AVX512 instructions */ +#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9) +/**< Scatter-gather mbufs are supported */ /** -- 2.20.1