X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_pkt_size_distr.c;h=57e330a7448afef802ec444ec28d36daf8466ee3;hb=a2999c7bfe395708a6a68ebb6f3199af7d25cffe;hp=96bf016147cb2e629495d59db3f450fdfa03ce5d;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 96bf016147..57e330a744 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -9,10 +9,10 @@ #include "scheduler_pmd_private.h" #define DEF_PKT_SIZE_THRESHOLD (0xffffff80) -#define SLAVE_IDX_SWITCH_MASK (0x01) -#define PRIMARY_SLAVE_IDX 0 -#define SECONDARY_SLAVE_IDX 1 -#define NB_PKT_SIZE_SLAVES 2 +#define WORKER_IDX_SWITCH_MASK (0x01) +#define PRIMARY_WORKER_IDX 0 +#define SECONDARY_WORKER_IDX 1 +#define NB_PKT_SIZE_WORKERS 2 /** pkt size based scheduler context */ struct psd_scheduler_ctx { @@ -21,15 +21,15 @@ struct psd_scheduler_ctx { /** pkt size based scheduler queue pair context */ struct psd_scheduler_qp_ctx { - struct scheduler_slave primary_slave; - struct scheduler_slave secondary_slave; + struct scheduler_worker primary_worker; + struct scheduler_worker secondary_worker; uint32_t threshold; uint8_t deq_idx; } __rte_cache_aligned; /** scheduling operation variables' wrapping */ struct psd_schedule_op { - uint8_t slave_idx; + uint8_t worker_idx; uint16_t pos; }; @@ -38,13 +38,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct scheduler_qp_ctx *qp_ctx = qp; struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx; - struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops]; - uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = { - psd_qp_ctx->primary_slave.nb_inflight_cops, - psd_qp_ctx->secondary_slave.nb_inflight_cops + struct rte_crypto_op *sched_ops[NB_PKT_SIZE_WORKERS][nb_ops]; + uint32_t in_flight_ops[NB_PKT_SIZE_WORKERS] = { + psd_qp_ctx->primary_worker.nb_inflight_cops, + psd_qp_ctx->secondary_worker.nb_inflight_cops }; - struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = { - {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0} + struct psd_schedule_op enq_ops[NB_PKT_SIZE_WORKERS] = { + {PRIMARY_WORKER_IDX, 0}, {SECONDARY_WORKER_IDX, 0} }; struct psd_schedule_op *p_enq_op; uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0; @@ -80,13 +80,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) /* stop schedule cops before the queue is full, this shall * prevent the failed enqueue */ - if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] == + if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == qp_ctx->max_nb_objs) { i = nb_ops; break; } - sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i]; + sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; job_len = ops[i+1]->sym->cipher.data.length; @@ -94,13 +94,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops[i+1]->sym->auth.data.length; p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)]; - if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] == + if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == qp_ctx->max_nb_objs) { i = nb_ops; break; } - sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1]; + sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1]; p_enq_op->pos++; job_len = ops[i+2]->sym->cipher.data.length; @@ -108,13 +108,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops[i+2]->sym->auth.data.length; p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)]; - if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] == + if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == qp_ctx->max_nb_objs) { i = nb_ops; break; } - sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2]; + sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2]; p_enq_op->pos++; job_len = ops[i+3]->sym->cipher.data.length; @@ -122,13 +122,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops[i+3]->sym->auth.data.length; p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)]; - if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] == + if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == qp_ctx->max_nb_objs) { i = nb_ops; break; } - sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3]; + sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3]; p_enq_op->pos++; } @@ -138,34 +138,34 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops[i]->sym->auth.data.length; p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)]; - if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] == + if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == qp_ctx->max_nb_objs) { i = nb_ops; break; } - sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i]; + sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; } processed_ops_pri = rte_cryptodev_enqueue_burst( - psd_qp_ctx->primary_slave.dev_id, - psd_qp_ctx->primary_slave.qp_id, - sched_ops[PRIMARY_SLAVE_IDX], - enq_ops[PRIMARY_SLAVE_IDX].pos); - /* enqueue shall not fail as the slave queue is monitored */ - RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos); + psd_qp_ctx->primary_worker.dev_id, + psd_qp_ctx->primary_worker.qp_id, + sched_ops[PRIMARY_WORKER_IDX], + enq_ops[PRIMARY_WORKER_IDX].pos); + /* enqueue shall not fail as the worker queue is monitored */ + RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_WORKER_IDX].pos); - psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri; + psd_qp_ctx->primary_worker.nb_inflight_cops += processed_ops_pri; processed_ops_sec = rte_cryptodev_enqueue_burst( - psd_qp_ctx->secondary_slave.dev_id, - psd_qp_ctx->secondary_slave.qp_id, - sched_ops[SECONDARY_SLAVE_IDX], - enq_ops[SECONDARY_SLAVE_IDX].pos); - RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos); + psd_qp_ctx->secondary_worker.dev_id, + psd_qp_ctx->secondary_worker.qp_id, + sched_ops[SECONDARY_WORKER_IDX], + enq_ops[SECONDARY_WORKER_IDX].pos); + RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_WORKER_IDX].pos); - psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec; + psd_qp_ctx->secondary_worker.nb_inflight_cops += processed_ops_sec; return processed_ops_pri + processed_ops_sec; } @@ -191,33 +191,33 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct psd_scheduler_qp_ctx *qp_ctx = ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; - struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = { - &qp_ctx->primary_slave, &qp_ctx->secondary_slave}; - struct scheduler_slave *slave = slaves[qp_ctx->deq_idx]; + struct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = { + &qp_ctx->primary_worker, &qp_ctx->secondary_worker}; + struct scheduler_worker *worker = workers[qp_ctx->deq_idx]; uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0; - if (slave->nb_inflight_cops) { - nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, ops, nb_ops); - slave->nb_inflight_cops -= nb_deq_ops_pri; + if (worker->nb_inflight_cops) { + nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id, + worker->qp_id, ops, nb_ops); + worker->nb_inflight_cops -= nb_deq_ops_pri; } - qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK; + qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_IDX_SWITCH_MASK; if (nb_deq_ops_pri == nb_ops) return nb_deq_ops_pri; - slave = slaves[qp_ctx->deq_idx]; + worker = workers[qp_ctx->deq_idx]; - if (slave->nb_inflight_cops) { - nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, &ops[nb_deq_ops_pri], + if (worker->nb_inflight_cops) { + nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id, + worker->qp_id, &ops[nb_deq_ops_pri], nb_ops - nb_deq_ops_pri); - slave->nb_inflight_cops -= nb_deq_ops_sec; + worker->nb_inflight_cops -= nb_deq_ops_sec; - if (!slave->nb_inflight_cops) + if (!worker->nb_inflight_cops) qp_ctx->deq_idx = (~qp_ctx->deq_idx) & - SLAVE_IDX_SWITCH_MASK; + WORKER_IDX_SWITCH_MASK; } return nb_deq_ops_pri + nb_deq_ops_sec; @@ -236,15 +236,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, } static int -slave_attach(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint8_t slave_id) +worker_attach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t worker_id) { return 0; } static int -slave_detach(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint8_t slave_id) +worker_detach(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint8_t worker_id) { return 0; } @@ -256,9 +256,9 @@ scheduler_start(struct rte_cryptodev *dev) struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx; uint16_t i; - /* for packet size based scheduler, nb_slaves have to >= 2 */ - if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) { - CS_LOG_ERR("not enough slaves to start"); + /* for packet size based scheduler, nb_workers have to >= 2 */ + if (sched_ctx->nb_workers < NB_PKT_SIZE_WORKERS) { + CR_SCHED_LOG(ERR, "not enough workers to start"); return -1; } @@ -267,15 +267,15 @@ scheduler_start(struct rte_cryptodev *dev) struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx; - ps_qp_ctx->primary_slave.dev_id = - sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id; - ps_qp_ctx->primary_slave.qp_id = i; - ps_qp_ctx->primary_slave.nb_inflight_cops = 0; + ps_qp_ctx->primary_worker.dev_id = + sched_ctx->workers[PRIMARY_WORKER_IDX].dev_id; + ps_qp_ctx->primary_worker.qp_id = i; + ps_qp_ctx->primary_worker.nb_inflight_cops = 0; - ps_qp_ctx->secondary_slave.dev_id = - sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id; - ps_qp_ctx->secondary_slave.qp_id = i; - ps_qp_ctx->secondary_slave.nb_inflight_cops = 0; + ps_qp_ctx->secondary_worker.dev_id = + sched_ctx->workers[SECONDARY_WORKER_IDX].dev_id; + ps_qp_ctx->secondary_worker.qp_id = i; + ps_qp_ctx->secondary_worker.nb_inflight_cops = 0; ps_qp_ctx->threshold = psd_ctx->threshold; } @@ -300,9 +300,9 @@ scheduler_stop(struct rte_cryptodev *dev) struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx; - if (ps_qp_ctx->primary_slave.nb_inflight_cops + - ps_qp_ctx->secondary_slave.nb_inflight_cops) { - CS_LOG_ERR("Some crypto ops left in slave queue"); + if (ps_qp_ctx->primary_worker.nb_inflight_cops + + ps_qp_ctx->secondary_worker.nb_inflight_cops) { + CR_SCHED_LOG(ERR, "Some crypto ops left in worker queue"); return -1; } } @@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0, rte_socket_id()); if (!ps_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -334,13 +334,15 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0, rte_socket_id()); if (!psd_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -360,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } threshold = ((struct rte_cryptodev_scheduler_threshold_option *) option)->threshold; if (!rte_is_power_of_2(threshold)) { - CS_LOG_ERR("Threshold is not power of 2"); + CR_SCHED_LOG(ERR, "Threshold is not power of 2"); return -EINVAL; } @@ -386,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } @@ -396,9 +398,9 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type, return 0; } -struct rte_cryptodev_scheduler_ops scheduler_ps_ops = { - slave_attach, - slave_detach, +static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = { + worker_attach, + worker_detach, scheduler_start, scheduler_stop, scheduler_config_qp, @@ -407,7 +409,7 @@ struct rte_cryptodev_scheduler_ops scheduler_ps_ops = { scheduler_option_get }; -struct rte_cryptodev_scheduler psd_scheduler = { +static struct rte_cryptodev_scheduler psd_scheduler = { .name = "packet-size-based-scheduler", .description = "scheduler which will distribute crypto op " "burst based on the packet size", @@ -415,4 +417,4 @@ struct rte_cryptodev_scheduler psd_scheduler = { .ops = &scheduler_ps_ops }; -struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler; +struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr = &psd_scheduler;