int lb_mlen;
};
-struct rid {
- /** Request id of a crypto operation */
- uintptr_t rid;
-};
-
/*
* Pending queue structure
*
/** Pending requests count */
uint64_t pending_count;
/** Array of pending requests */
- struct rid *rid_queue;
+ uintptr_t *req_queue;
/** Tail of queue to be used for enqueue */
uint16_t enq_tail;
/** Head of queue to be used for dequeue */
len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
/* For pending queue */
- len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ len += qlen * sizeof(uintptr_t);
/* So that instruction queues start as pg size aligned */
len = RTE_ALIGN(len, pg_sz);
}
/* Pending queue setup */
- cptvf->pqueue.rid_queue = (struct rid *)mem;
+ cptvf->pqueue.req_queue = (uintptr_t *)mem;
cptvf->pqueue.enq_tail = 0;
cptvf->pqueue.deq_head = 0;
cptvf->pqueue.pending_count = 0;
- mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
- len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
- dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ mem += qlen * sizeof(uintptr_t);
+ len -= qlen * sizeof(uintptr_t);
+ dma_addr += qlen * sizeof(uintptr_t);
/* Alignment wastage */
used_len = alloc_len - len;
/* Default mode of software queue */
mark_cpt_inst(instance);
- pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
+ pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)user_req;
/* We will use soft queue length here to limit requests */
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
struct cpt_instance *instance = (struct cpt_instance *)qptr;
struct cpt_request_info *user_req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
- struct rid *rid_e;
uint8_t cc[nb_ops];
int i, count, pcount;
uint8_t ret;
count = (nb_ops > pcount) ? pcount : nb_ops;
for (i = 0; i < count; i++) {
- rid_e = &pqueue->rid_queue[pqueue->deq_head];
- user_req = (struct cpt_request_info *)(rid_e->rid);
+ user_req = (struct cpt_request_info *)
+ pqueue->req_queue[pqueue->deq_head];
- if (likely((i+1) < count))
- rte_prefetch_non_temporal((void *)rid_e[1].rid);
+ if (likely((i+1) < count)) {
+ rte_prefetch_non_temporal(
+ (void *)pqueue->req_queue[i+1]);
+ }
ret = check_nb_command_id(user_req, instance);
size_div40 = (iq_len + 40 - 1) / 40 + 1;
/* For pending queue */
- len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+ len = iq_len * sizeof(uintptr_t);
/* Space for instruction group memory */
len += size_div40 * 16;
}
/* Initialize pending queue */
- qp->pend_q.rid_queue = (struct rid *)va;
+ qp->pend_q.req_queue = (uintptr_t *)va;
qp->pend_q.enq_tail = 0;
qp->pend_q.deq_head = 0;
qp->pend_q.pending_count = 0;
- used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+ used_len = iq_len * sizeof(uintptr_t);
used_len += size_div40 * 16;
used_len = RTE_ALIGN(used_len, pg_sz);
iova += used_len;
lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
} while (lmt_status == 0);
- pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
+ pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
/* We will use soft queue length here to limit requests */
MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
struct cpt_request_info *req;
struct rte_crypto_op *cop;
uint8_t cc[nb_ops];
- struct rid *rid;
uintptr_t *rsp;
void *metabuf;
nb_ops = nb_pending;
for (i = 0; i < nb_ops; i++) {
- rid = &pend_q->rid_queue[pend_q->deq_head];
- req = (struct cpt_request_info *)(rid->rid);
+ req = (struct cpt_request_info *)
+ pend_q->req_queue[pend_q->deq_head];
cc[i] = otx2_cpt_compcode_get(req);