This patch fixes the queue pair configuration for the scheduler PMD.
The queue pairs of a scheduler may have different nb_descriptors sizes,
which was not the case. Also, the maximum available objects in a
queue pair is 1 object smaller than nb_descriptors. This patch fixes
these issues.
Fixes:
a783aa634410 ("crypto/scheduler: add packet size based mode")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
struct scheduler_slave primary_slave;
struct scheduler_slave secondary_slave;
uint32_t threshold;
struct scheduler_slave primary_slave;
struct scheduler_slave secondary_slave;
uint32_t threshold;
uint8_t deq_idx;
} __rte_cache_aligned;
uint8_t deq_idx;
} __rte_cache_aligned;
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct psd_scheduler_qp_ctx *qp_ctx =
- ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
- qp_ctx->primary_slave.nb_inflight_cops,
- qp_ctx->secondary_slave.nb_inflight_cops
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
};
struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
};
struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
/* decide the target op based on the job length */
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
/* decide the target op based on the job length */
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
/* stop schedule cops before the queue is full, this shall
* prevent the failed enqueue
/* stop schedule cops before the queue is full, this shall
* prevent the failed enqueue
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
}
processed_ops_pri = rte_cryptodev_enqueue_burst(
}
processed_ops_pri = rte_cryptodev_enqueue_burst(
- qp_ctx->primary_slave.dev_id,
- qp_ctx->primary_slave.qp_id,
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
sched_ops[PRIMARY_SLAVE_IDX],
enq_ops[PRIMARY_SLAVE_IDX].pos);
/* enqueue shall not fail as the slave queue is monitored */
RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
sched_ops[PRIMARY_SLAVE_IDX],
enq_ops[PRIMARY_SLAVE_IDX].pos);
/* enqueue shall not fail as the slave queue is monitored */
RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
- qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
processed_ops_sec = rte_cryptodev_enqueue_burst(
processed_ops_sec = rte_cryptodev_enqueue_burst(
- qp_ctx->secondary_slave.dev_id,
- qp_ctx->secondary_slave.qp_id,
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
sched_ops[SECONDARY_SLAVE_IDX],
enq_ops[SECONDARY_SLAVE_IDX].pos);
RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
sched_ops[SECONDARY_SLAVE_IDX],
enq_ops[SECONDARY_SLAVE_IDX].pos);
RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
- qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
return processed_ops_pri + processed_ops_sec;
}
return processed_ops_pri + processed_ops_sec;
}
ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
ps_qp_ctx->threshold = psd_ctx->threshold;
ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
ps_qp_ctx->threshold = psd_ctx->threshold;
-
- ps_qp_ctx->max_nb_objs = sched_ctx->qp_conf.nb_descriptors;
}
if (sched_ctx->reordering_enabled) {
}
if (sched_ctx->reordering_enabled) {
- sched_ctx->qp_conf.nb_descriptors = qp_conf->nb_descriptors;
-
/* Allocate the queue pair data structure. */
qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
socket_id);
if (qp_ctx == NULL)
return -ENOMEM;
/* Allocate the queue pair data structure. */
qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
socket_id);
if (qp_ctx == NULL)
return -ENOMEM;
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
dev->data->queue_pairs[qp_id] = qp_ctx;
if (*sched_ctx->ops.config_queue_pair) {
dev->data->queue_pairs[qp_id] = qp_ctx;
if (*sched_ctx->ops.config_queue_pair) {
uint8_t reordering_enabled;
uint8_t reordering_enabled;
- struct rte_cryptodev_qp_conf qp_conf;
-
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
} __rte_cache_aligned;
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
} __rte_cache_aligned;
struct scheduler_qp_ctx {
void *private_qp_ctx;
struct scheduler_qp_ctx {
void *private_qp_ctx;
+ uint32_t max_nb_objs;
+
struct rte_ring *order_ring;
uint32_t seqn;
} __rte_cache_aligned;
struct rte_ring *order_ring;
uint32_t seqn;
} __rte_cache_aligned;