version: 20.11-rc0
[dpdk.git] / drivers / crypto / scheduler / scheduler_multicore.c
index b2ce44c..2d6790b 100644 (file)
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
        uint32_t num_workers;             /**< Number of workers polling */
        uint32_t stop_signal;
 
-       struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
-       struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+       struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+       struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
 };
 
 struct mc_scheduler_qp_ctx {
@@ -110,31 +110,25 @@ static uint16_t
 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
 {
-       struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring;
+       struct rte_ring *order_ring =
+               ((struct scheduler_qp_ctx *)qp)->order_ring;
        struct rte_crypto_op *op;
-       uint32_t nb_objs = rte_ring_count(order_ring);
-       uint32_t nb_ops_to_deq = 0;
-       uint32_t nb_ops_deqd = 0;
-
-       if (nb_objs > nb_ops)
-               nb_objs = nb_ops;
+       uint32_t nb_objs, nb_ops_to_deq;
 
-       while (nb_ops_to_deq < nb_objs) {
-               SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+       nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops,
+               nb_ops, NULL);
+       if (nb_objs == 0)
+               return 0;
 
+       for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) {
+               op = ops[nb_ops_to_deq];
                if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE))
                        break;
-
                op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE;
-               nb_ops_to_deq++;
        }
 
-       if (nb_ops_to_deq) {
-               nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
-                               (void **)ops, nb_ops_to_deq, NULL);
-       }
-
-       return nb_ops_deqd;
+       rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
+       return nb_ops_to_deq;
 }
 
 static int
@@ -178,7 +172,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
                }
        }
        if (worker_idx == -1) {
-               CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+               CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+                       core_id);
                return -1;
        }
 
@@ -313,7 +308,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
        mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
                        rte_socket_id());
        if (!mc_qp_ctx) {
-               CS_LOG_ERR("failed allocate memory for private queue pair");
+               CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
                return -ENOMEM;
        }
 
@@ -328,16 +323,18 @@ static int
 scheduler_create_private_ctx(struct rte_cryptodev *dev)
 {
        struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-       struct mc_scheduler_ctx *mc_ctx;
+       struct mc_scheduler_ctx *mc_ctx = NULL;
        uint16_t i;
 
-       if (sched_ctx->private_ctx)
+       if (sched_ctx->private_ctx) {
                rte_free(sched_ctx->private_ctx);
+               sched_ctx->private_ctx = NULL;
+       }
 
        mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
                        rte_socket_id());
        if (!mc_ctx) {
-               CS_LOG_ERR("failed allocate memory");
+               CR_SCHED_LOG(ERR, "failed allocate memory");
                return -ENOMEM;
        }
 
@@ -345,28 +342,51 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
        for (i = 0; i < sched_ctx->nb_wc; i++) {
                char r_name[16];
 
-               snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
-               mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
-                                       rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+               snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+                               "%u_%u", dev->data->dev_id, i);
+               mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
                if (!mc_ctx->sched_enq_ring[i]) {
-                       CS_LOG_ERR("Cannot create ring for worker %u", i);
-                       return -1;
+                       mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+                                               PER_SLAVE_BUFF_SIZE,
+                                               rte_socket_id(),
+                                               RING_F_SC_DEQ | RING_F_SP_ENQ);
+                       if (!mc_ctx->sched_enq_ring[i]) {
+                               CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+                                          i);
+                               goto exit;
+                       }
                }
-               snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
-               mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
-                                       rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+               snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+                               "%u_%u", dev->data->dev_id, i);
+               mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
                if (!mc_ctx->sched_deq_ring[i]) {
-                       CS_LOG_ERR("Cannot create ring for worker %u", i);
-                       return -1;
+                       mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+                                               PER_SLAVE_BUFF_SIZE,
+                                               rte_socket_id(),
+                                               RING_F_SC_DEQ | RING_F_SP_ENQ);
+                       if (!mc_ctx->sched_deq_ring[i]) {
+                               CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+                                          i);
+                               goto exit;
+                       }
                }
        }
 
        sched_ctx->private_ctx = (void *)mc_ctx;
 
        return 0;
+
+exit:
+       for (i = 0; i < sched_ctx->nb_wc; i++) {
+               rte_ring_free(mc_ctx->sched_enq_ring[i]);
+               rte_ring_free(mc_ctx->sched_deq_ring[i]);
+       }
+       rte_free(mc_ctx);
+
+       return -1;
 }
 
-struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
        slave_attach,
        slave_detach,
        scheduler_start,
@@ -377,11 +397,11 @@ struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
        NULL    /* option_get */
 };
 
-struct rte_cryptodev_scheduler mc_scheduler = {
+static struct rte_cryptodev_scheduler mc_scheduler = {
                .name = "multicore-scheduler",
                .description = "scheduler which will run burst across multiple cpu cores",
                .mode = CDEV_SCHED_MODE_MULTICORE,
                .ops = &scheduler_mc_ops
 };
 
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;