]> git.droids-corp.org - dpdk.git/commitdiff
crypto/aesni_gcm: do crypto op in dequeue function
authorSergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Wed, 29 Mar 2017 13:42:53 +0000 (14:42 +0100)
committerPablo de Lara <pablo.de.lara.guarch@intel.com>
Wed, 5 Apr 2017 22:17:44 +0000 (00:17 +0200)
There is bug when more crypto ops are enqueued than dequeued.
The return value is not checked when trying to enqueue the
processed crypto op into the internal ring, which in the case of being
full will results in crypto ops and mbufs being leaked.
The issue is more obvious with different cores doing enqueue/dequeue.

This patch moves the crypto operation to the dequeue function which
fixes the above issue without having to check for the number of free
entries in the ring.

Fixes: eec136f3c54f ("aesni_gcm: add driver for AES-GCM crypto operations")
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c

index 638a95d51f5a9bf7483f7d67b68672c3fae588b8..ceec765110b53926e470bfa3c93e2aa20ff8f956 100644 (file)
@@ -375,55 +375,58 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
                rte_mempool_put(qp->sess_mp, op->sym->session);
                op->sym->session = NULL;
        }
-
-       rte_ring_enqueue(qp->processed_pkts, (void *)op);
 }
 
 static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
        struct aesni_gcm_session *sess;
        struct aesni_gcm_qp *qp = queue_pair;
 
-       int i, retval = 0;
+       int retval = 0;
+       unsigned int i, nb_dequeued;
+
+       nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+                       (void **)ops, nb_ops, NULL);
 
-       for (i = 0; i < nb_ops; i++) {
+       for (i = 0; i < nb_dequeued; i++) {
 
                sess = aesni_gcm_get_session(qp, ops[i]->sym);
                if (unlikely(sess == NULL)) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       qp->qp_stats.enqueue_err_count++;
+                       qp->qp_stats.dequeue_err_count++;
                        break;
                }
 
                retval = process_gcm_crypto_op(ops[i]->sym, sess);
                if (retval < 0) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       qp->qp_stats.enqueue_err_count++;
+                       qp->qp_stats.dequeue_err_count++;
                        break;
                }
 
                handle_completed_gcm_crypto_op(qp, ops[i]);
-
-               qp->qp_stats.enqueued_count++;
        }
+
+       qp->qp_stats.dequeued_count += i;
+
        return i;
 }
 
 static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
                struct rte_crypto_op **ops, uint16_t nb_ops)
 {
        struct aesni_gcm_qp *qp = queue_pair;
 
-       unsigned nb_dequeued;
+       unsigned int nb_enqueued;
 
-       nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+       nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
                        (void **)ops, nb_ops, NULL);
-       qp->qp_stats.dequeued_count += nb_dequeued;
+       qp->qp_stats.enqueued_count += nb_enqueued;
 
-       return nb_dequeued;
+       return nb_enqueued;
 }
 
 static int aesni_gcm_remove(const char *name);