crypto/scheduler: fix queue pair configuration
authorFan Zhang <roy.fan.zhang@intel.com>
Mon, 10 Apr 2017 15:00:54 +0000 (16:00 +0100)
committerPablo de Lara <pablo.de.lara.guarch@intel.com>
Thu, 20 Apr 2017 09:32:45 +0000 (11:32 +0200)
This patch fixes the queue pair configuration for the scheduler PMD.
The queue pairs of a scheduler may have different nb_descriptors sizes,
which was not the case. Also, the maximum available objects in a
queue pair is 1 object smaller than nb_descriptors. This patch fixes
these issues.

Fixes: a783aa634410 ("crypto/scheduler: add packet size based mode")

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
drivers/crypto/scheduler/scheduler_pkt_size_distr.c
drivers/crypto/scheduler/scheduler_pmd_ops.c
drivers/crypto/scheduler/scheduler_pmd_private.h

index 94196d9..6b628df 100644 (file)
@@ -52,7 +52,6 @@ struct psd_scheduler_qp_ctx {
        struct scheduler_slave primary_slave;
        struct scheduler_slave secondary_slave;
        uint32_t threshold;
-       uint32_t max_nb_objs;
        uint8_t deq_idx;
 } __rte_cache_aligned;
 
@@ -65,13 +64,13 @@ struct psd_schedule_op {
 static uint16_t
 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-       struct psd_scheduler_qp_ctx *qp_ctx =
-                       ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+       struct scheduler_qp_ctx *qp_ctx = qp;
+       struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
        struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
        struct scheduler_session *sess;
        uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
-                       qp_ctx->primary_slave.nb_inflight_cops,
-                       qp_ctx->secondary_slave.nb_inflight_cops
+                       psd_qp_ctx->primary_slave.nb_inflight_cops,
+                       psd_qp_ctx->secondary_slave.nb_inflight_cops
        };
        struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
                {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
@@ -107,7 +106,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
                job_len += (ops[i]->sym->cipher.data.length == 0) *
                                ops[i]->sym->auth.data.length;
                /* decide the target op based on the job length */
-               p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+               p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
                /* stop schedule cops before the queue is full, this shall
                 * prevent the failed enqueue
@@ -127,7 +126,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
                job_len = ops[i+1]->sym->cipher.data.length;
                job_len += (ops[i+1]->sym->cipher.data.length == 0) *
                                ops[i+1]->sym->auth.data.length;
-               p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+               p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
                if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
                                qp_ctx->max_nb_objs) {
@@ -144,7 +143,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
                job_len = ops[i+2]->sym->cipher.data.length;
                job_len += (ops[i+2]->sym->cipher.data.length == 0) *
                                ops[i+2]->sym->auth.data.length;
-               p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+               p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
                if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
                                qp_ctx->max_nb_objs) {
@@ -162,7 +161,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
                job_len = ops[i+3]->sym->cipher.data.length;
                job_len += (ops[i+3]->sym->cipher.data.length == 0) *
                                ops[i+3]->sym->auth.data.length;
-               p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+               p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
                if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
                                qp_ctx->max_nb_objs) {
@@ -182,7 +181,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
                job_len = ops[i]->sym->cipher.data.length;
                job_len += (ops[i]->sym->cipher.data.length == 0) *
                                ops[i]->sym->auth.data.length;
-               p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+               p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
                if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
                                qp_ctx->max_nb_objs) {
@@ -196,23 +195,23 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
        }
 
        processed_ops_pri = rte_cryptodev_enqueue_burst(
-                       qp_ctx->primary_slave.dev_id,
-                       qp_ctx->primary_slave.qp_id,
+                       psd_qp_ctx->primary_slave.dev_id,
+                       psd_qp_ctx->primary_slave.qp_id,
                        sched_ops[PRIMARY_SLAVE_IDX],
                        enq_ops[PRIMARY_SLAVE_IDX].pos);
        /* enqueue shall not fail as the slave queue is monitored */
        RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
 
-       qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+       psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
 
        processed_ops_sec = rte_cryptodev_enqueue_burst(
-                       qp_ctx->secondary_slave.dev_id,
-                       qp_ctx->secondary_slave.qp_id,
+                       psd_qp_ctx->secondary_slave.dev_id,
+                       psd_qp_ctx->secondary_slave.qp_id,
                        sched_ops[SECONDARY_SLAVE_IDX],
                        enq_ops[SECONDARY_SLAVE_IDX].pos);
        RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
 
-       qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+       psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
 
        return processed_ops_pri + processed_ops_sec;
 }
@@ -325,8 +324,6 @@ scheduler_start(struct rte_cryptodev *dev)
                ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
 
                ps_qp_ctx->threshold = psd_ctx->threshold;
-
-               ps_qp_ctx->max_nb_objs = sched_ctx->qp_conf.nb_descriptors;
        }
 
        if (sched_ctx->reordering_enabled) {
index 34e0cc9..725ba9d 100644 (file)
@@ -367,14 +367,15 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
                        return ret;
        }
 
-       sched_ctx->qp_conf.nb_descriptors = qp_conf->nb_descriptors;
-
        /* Allocate the queue pair data structure. */
        qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
                        socket_id);
        if (qp_ctx == NULL)
                return -ENOMEM;
 
+       /* The actual available object number = nb_descriptors - 1 */
+       qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
        dev->data->queue_pairs[qp_id] = qp_ctx;
 
        if (*sched_ctx->ops.config_queue_pair) {
index 2f4feea..cfffa06 100644 (file)
@@ -84,8 +84,6 @@ struct scheduler_ctx {
 
        uint8_t reordering_enabled;
 
-       struct rte_cryptodev_qp_conf qp_conf;
-
        char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
        char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
 } __rte_cache_aligned;
@@ -93,6 +91,8 @@ struct scheduler_ctx {
 struct scheduler_qp_ctx {
        void *private_qp_ctx;
 
+       uint32_t max_nb_objs;
+
        struct rte_ring *order_ring;
        uint32_t seqn;
 } __rte_cache_aligned;