break;
}
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ GCM_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+#endif
+
retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
int i, processed_jobs = 0;
for (i = 0; i < nb_ops; i++) {
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+ if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
MB_LOG_ERR("PMD only supports symmetric crypto "
"operation requests, op (%p) is not a "
- "symmetric operation.", op);
+ "symmetric operation.", ops[i]);
+ qp->stats.enqueue_err_count++;
+ goto flush_jobs;
+ }
+
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ MB_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
qp->stats.enqueue_err_count++;
goto flush_jobs;
}
#endif
+
sess = get_session(qp, ops[i]);
if (unlikely(sess == NULL)) {
qp->stats.enqueue_err_count++;
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
+ (curr_c_op->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
/* Set status as enqueued (not processed yet) by default. */
curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = dev->data->dev_private;
unsigned i;
unsigned enqueued_ops, processed_ops;
+#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+ for (i = 0; i < num_ops; i++) {
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return 0;
+ }
+ }
+#endif
+
switch (session->op) {
case SNOW3G_OP_ONLY_CIPHER:
processed_ops = process_snow3g_cipher_op(ops,
break;
}
+#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
return "CPU_AESNI";
case RTE_CRYPTODEV_FF_HW_ACCELERATED:
return "HW_ACCELERATED";
-
+ case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
+ return "MBUF_SCATTER_GATHER";
default:
return NULL;
}
}
-
int
rte_cryptodev_create_vdev(const char *name, const char *args)
{
/**< Operations are off-loaded to an external hardware accelerator */
#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
/**< Utilises CPU SIMD AVX512 instructions */
+#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
+/**< Scatter-gather mbufs are supported */
/**