X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_multicore.c;h=900ab4049d85eb6295e59051882f096ac907848c;hb=fdb67b84a52970ae06b54464a449b6f503f5a7ab;hp=b2ce44ceb5c0090ef4d480b10b86ae35f3111fc7;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index b2ce44ceb5..900ab4049d 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -3,7 +3,7 @@ */ #include -#include +#include #include #include "rte_cryptodev_scheduler_operations.h" @@ -21,13 +21,13 @@ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ uint32_t stop_signal; - struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; - struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_enq_ring[RTE_MAX_LCORE]; + struct rte_ring *sched_deq_ring[RTE_MAX_LCORE]; }; struct mc_scheduler_qp_ctx { - struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; - uint32_t nb_slaves; + struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + uint32_t nb_workers; uint32_t last_enq_worker_idx; uint32_t last_deq_worker_idx; @@ -110,43 +110,37 @@ static uint16_t schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct rte_ring *order_ring = ((struct scheduler_qp_ctx *)qp)->order_ring; + struct rte_ring *order_ring = + ((struct scheduler_qp_ctx *)qp)->order_ring; struct rte_crypto_op *op; - uint32_t nb_objs = rte_ring_count(order_ring); - uint32_t nb_ops_to_deq = 0; - uint32_t nb_ops_deqd = 0; - - if (nb_objs > nb_ops) - nb_objs = nb_ops; + uint32_t nb_objs, nb_ops_to_deq; - while (nb_ops_to_deq < nb_objs) { - SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, + nb_ops, NULL); + if (nb_objs == 0) + return 0; + for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { + op = ops[nb_ops_to_deq]; if (!(op->status & CRYPTO_OP_STATUS_BIT_COMPLETE)) break; - op->status &= ~CRYPTO_OP_STATUS_BIT_COMPLETE; - nb_ops_to_deq++; } - if (nb_ops_to_deq) { - nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, - (void **)ops, nb_ops_to_deq, NULL); - } - - return nb_ops_deqd; + rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); + return nb_ops_to_deq; } static int -slave_attach(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint8_t slave_id) +worker_attach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t worker_id) { return 0; } static int -slave_detach(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint8_t slave_id) +worker_detach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t worker_id) { return 0; } @@ -160,7 +154,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev) struct rte_ring *deq_ring; uint32_t core_id = rte_lcore_id(); int i, worker_idx = -1; - struct scheduler_slave *slave; + struct scheduler_worker *worker; struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE]; struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE]; uint16_t processed_ops; @@ -178,19 +172,21 @@ mc_scheduler_worker(struct rte_cryptodev *dev) } } if (worker_idx == -1) { - CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id); + CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!", + core_id); return -1; } - slave = &sched_ctx->slaves[worker_idx]; + worker = &sched_ctx->workers[worker_idx]; enq_ring = mc_ctx->sched_enq_ring[worker_idx]; deq_ring = mc_ctx->sched_deq_ring[worker_idx]; while (!mc_ctx->stop_signal) { if (pending_enq_ops) { processed_ops = - rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, &enq_ops[pending_enq_ops_idx], + rte_cryptodev_enqueue_burst(worker->dev_id, + worker->qp_id, + &enq_ops[pending_enq_ops_idx], pending_enq_ops); pending_enq_ops -= processed_ops; pending_enq_ops_idx += processed_ops; @@ -200,8 +196,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev) MC_SCHED_BUFFER_SIZE, NULL); if (processed_ops) { pending_enq_ops_idx = rte_cryptodev_enqueue_burst( - slave->dev_id, slave->qp_id, - enq_ops, processed_ops); + worker->dev_id, worker->qp_id, + enq_ops, processed_ops); pending_enq_ops = processed_ops - pending_enq_ops_idx; inflight_ops += pending_enq_ops_idx; } @@ -214,8 +210,9 @@ mc_scheduler_worker(struct rte_cryptodev *dev) pending_deq_ops -= processed_ops; pending_deq_ops_idx += processed_ops; } else if (inflight_ops) { - processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE); + processed_ops = rte_cryptodev_dequeue_burst( + worker->dev_id, worker->qp_id, deq_ops, + MC_SCHED_BUFFER_SIZE); if (processed_ops) { inflight_ops -= processed_ops; if (reordering_enabled) { @@ -269,16 +266,16 @@ scheduler_start(struct rte_cryptodev *dev) qp_ctx->private_qp_ctx; uint32_t j; - memset(mc_qp_ctx->slaves, 0, - RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES * - sizeof(struct scheduler_slave)); - for (j = 0; j < sched_ctx->nb_slaves; j++) { - mc_qp_ctx->slaves[j].dev_id = - sched_ctx->slaves[j].dev_id; - mc_qp_ctx->slaves[j].qp_id = i; + memset(mc_qp_ctx->workers, 0, + RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS * + sizeof(struct scheduler_worker)); + for (j = 0; j < sched_ctx->nb_workers; j++) { + mc_qp_ctx->workers[j].dev_id = + sched_ctx->workers[j].dev_id; + mc_qp_ctx->workers[j].qp_id = i; } - mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves; + mc_qp_ctx->nb_workers = sched_ctx->nb_workers; mc_qp_ctx->last_enq_worker_idx = 0; mc_qp_ctx->last_deq_worker_idx = 0; @@ -313,7 +310,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0, rte_socket_id()); if (!mc_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -328,16 +325,18 @@ static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct mc_scheduler_ctx *mc_ctx; + struct mc_scheduler_ctx *mc_ctx = NULL; uint16_t i; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, rte_socket_id()); if (!mc_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -345,30 +344,53 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) for (i = 0; i < sched_ctx->nb_wc; i++) { char r_name[16]; - snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_enq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, + PER_WORKER_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } - snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_deq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, + PER_WORKER_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } } sched_ctx->private_ctx = (void *)mc_ctx; return 0; + +exit: + for (i = 0; i < sched_ctx->nb_wc; i++) { + rte_ring_free(mc_ctx->sched_enq_ring[i]); + rte_ring_free(mc_ctx->sched_deq_ring[i]); + } + rte_free(mc_ctx); + + return -1; } -struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { - slave_attach, - slave_detach, +static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { + worker_attach, + worker_detach, scheduler_start, scheduler_stop, scheduler_config_qp, @@ -377,11 +399,11 @@ struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { NULL /* option_get */ }; -struct rte_cryptodev_scheduler mc_scheduler = { +static struct rte_cryptodev_scheduler mc_scheduler = { .name = "multicore-scheduler", .description = "scheduler which will run burst across multiple cpu cores", .mode = CDEV_SCHED_MODE_MULTICORE, .ops = &scheduler_mc_ops }; -struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler; +struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;