*/
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
#include <rte_event_crypto_adapter.h>
#include <rte_ip.h>
#include <rte_vect.h>
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
asym_op = op->asym;
- sess = get_asym_session_private_data(
- asym_op->session, cn9k_cryptodev_driver_id);
+ sess = (struct cnxk_ae_sess *)
+ asym_op->session->sess_private_data;
ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
inst->w7.u64 = sess->cpt_inst_w7;
} else {
do {
/* Copy CPT command to LMTLINE */
- roc_lmt_mov((void *)lmtline, inst, 2);
+ roc_lmt_mov64((void *)lmtline, inst);
/*
* Make sure compiler does not reorder memcpy and ldeor.
uint16_t nb_allowed, count = 0;
struct cnxk_cpt_qp *qp = qptr;
struct pending_queue *pend_q;
- uint64_t enq_tail;
+ uint64_t head;
int ret;
- const uint32_t nb_desc = qp->lf.nb_desc;
- const uint64_t lmt_base = qp->lf.lmt_base;
- const uint64_t io_addr = qp->lf.io_addr;
+ const union cpt_res_s res = {
+ .cn10k.compcode = CPT_COMP_NOT_DONE,
+ };
pend_q = &qp->pend_q;
+ const uint64_t lmt_base = qp->lf.lmt_base;
+ const uint64_t io_addr = qp->lf.io_addr;
+ const uint64_t pq_mask = pend_q->pq_mask;
+
/* Clear w0, w2, w3 of both inst */
inst[0].w0.u64 = 0;
inst[1].w2.u64 = 0;
inst[1].w3.u64 = 0;
- nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
+ head = pend_q->head;
+ nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
nb_ops = RTE_MIN(nb_ops, nb_allowed);
- enq_tail = pend_q->enq_tail;
-
if (unlikely(nb_ops & 1)) {
op_1 = ops[0];
- infl_req_1 = &pend_q->req_queue[enq_tail];
+ infl_req_1 = &pend_q->req_queue[head];
infl_req_1->op_flags = 0;
ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
inst[0].res_addr = (uint64_t)&infl_req_1->res;
cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
- MOD_INC(enq_tail, nb_desc);
+ pending_queue_advance(&head, pq_mask);
count++;
}
op_1 = ops[count];
op_2 = ops[count + 1];
- infl_req_1 = &pend_q->req_queue[enq_tail];
- MOD_INC(enq_tail, nb_desc);
- infl_req_2 = &pend_q->req_queue[enq_tail];
- MOD_INC(enq_tail, nb_desc);
+ infl_req_1 = &pend_q->req_queue[head];
+ pending_queue_advance(&head, pq_mask);
+ infl_req_2 = &pend_q->req_queue[head];
+ pending_queue_advance(&head, pq_mask);
infl_req_1->cop = op_1;
infl_req_2->cop = op_2;
infl_req_1->op_flags = 0;
infl_req_2->op_flags = 0;
- infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+ __atomic_store_n(&infl_req_1->res.u64[0], res.u64[0],
+ __ATOMIC_RELAXED);
inst[0].res_addr = (uint64_t)&infl_req_1->res;
- infl_req_2->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+ __atomic_store_n(&infl_req_2->res.u64[0], res.u64[0],
+ __ATOMIC_RELAXED);
inst[1].res_addr = (uint64_t)&infl_req_2->res;
ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
if (unlikely(ret)) {
plt_dp_err("Could not process op: %p", op_1);
- if (enq_tail == 0)
- enq_tail = nb_desc - 2;
- else if (enq_tail == 1)
- enq_tail = nb_desc - 1;
- else
- enq_tail--;
+ pending_queue_retreat(&head, pq_mask, 2);
break;
}
ret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);
if (unlikely(ret)) {
plt_dp_err("Could not process op: %p", op_2);
- if (enq_tail == 0)
- enq_tail = nb_desc - 1;
- else
- enq_tail--;
-
+ pending_queue_retreat(&head, pq_mask, 1);
cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
count++;
break;
count += 2;
}
- pend_q->enq_tail = enq_tail;
- pend_q->pending_count += count;
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+ pend_q->head = head;
pend_q->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
static inline void
cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
- struct cpt_inflight_req *infl_req)
+ struct cpt_inflight_req *infl_req,
+ struct cpt_cn9k_res_s *res)
{
- struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
unsigned int sz;
if (likely(res->compcode == CPT_COMP_GOOD)) {
uintptr_t *mdata = infl_req->mdata;
struct cnxk_ae_sess *sess;
- sess = get_asym_session_private_data(
- op->session, cn9k_cryptodev_driver_id);
+ sess = (struct cnxk_ae_sess *)
+ op->session->sess_private_data;
cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
}
struct cpt_inflight_req *infl_req;
struct rte_crypto_op *cop;
struct cnxk_cpt_qp *qp;
+ union cpt_res_s res;
infl_req = (struct cpt_inflight_req *)(get_work1);
cop = infl_req->cop;
qp = infl_req->qp;
- cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+
+ cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn9k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
struct cpt_inflight_req *infl_req;
struct cnxk_cpt_qp *qp = qptr;
struct pending_queue *pend_q;
- struct cpt_cn9k_res_s *res;
+ uint64_t infl_cnt, pq_tail;
struct rte_crypto_op *cop;
- uint32_t pq_deq_head;
+ union cpt_res_s res;
int i;
- const uint32_t nb_desc = qp->lf.nb_desc;
-
pend_q = &qp->pend_q;
- nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
+ const uint64_t pq_mask = pend_q->pq_mask;
+
+ pq_tail = pend_q->tail;
+ infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
+ nb_ops = RTE_MIN(nb_ops, infl_cnt);
- pq_deq_head = pend_q->deq_head;
+ /* Ensure infl_cnt isn't read before data lands */
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
for (i = 0; i < nb_ops; i++) {
- infl_req = &pend_q->req_queue[pq_deq_head];
+ infl_req = &pend_q->req_queue[pq_tail];
- res = (struct cpt_cn9k_res_s *)&infl_req->res;
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
+ __ATOMIC_RELAXED);
- if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
+ if (unlikely(res.cn9k.compcode == CPT_COMP_NOT_DONE)) {
if (unlikely(rte_get_timer_cycles() >
pend_q->time_out)) {
plt_err("Request timed out");
+ cnxk_cpt_dump_on_err(qp);
pend_q->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT *
rte_get_timer_hz();
break;
}
- MOD_INC(pq_deq_head, nb_desc);
+ pending_queue_advance(&pq_tail, pq_mask);
cop = infl_req->cop;
ops[i] = cop;
- cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
+ cn9k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn9k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
}
- pend_q->pending_count -= i;
- pend_q->deq_head = pq_deq_head;
+ pend_q->tail = pq_tail;
return i;
}