uint64_t w7;
int ret;
+ const union cpt_res_s res = {
+ .cn10k.compcode = CPT_COMP_NOT_DONE,
+ };
+
op = ops[0];
inst[0].w0.u64 = 0;
}
inst[0].res_addr = (uint64_t)&infl_req->res;
- infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+ __atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
infl_req->cop = op;
inst[0].w7.u64 = w7;
static inline void
cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
struct rte_crypto_op *cop,
- struct cpt_inflight_req *infl_req)
+ struct cpt_inflight_req *infl_req,
+ struct cpt_cn10k_res_s *res)
{
- struct cpt_cn10k_res_s *res = (struct cpt_cn10k_res_s *)&infl_req->res;
const uint8_t uc_compcode = res->uc_compcode;
const uint8_t compcode = res->compcode;
unsigned int sz;
struct cpt_inflight_req *infl_req;
struct rte_crypto_op *cop;
struct cnxk_cpt_qp *qp;
+ union cpt_res_s res;
infl_req = (struct cpt_inflight_req *)(get_work1);
cop = infl_req->cop;
qp = infl_req->qp;
- cn10k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+
+ cn10k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn10k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
struct cpt_inflight_req *infl_req;
struct cnxk_cpt_qp *qp = qptr;
struct pending_queue *pend_q;
- struct cpt_cn10k_res_s *res;
uint64_t infl_cnt, pq_tail;
struct rte_crypto_op *cop;
+ union cpt_res_s res;
int i;
pend_q = &qp->pend_q;
for (i = 0; i < nb_ops; i++) {
infl_req = &pend_q->req_queue[pq_tail];
- res = (struct cpt_cn10k_res_s *)&infl_req->res;
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
+ __ATOMIC_RELAXED);
- if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
+ if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {
if (unlikely(rte_get_timer_cycles() >
pend_q->time_out)) {
plt_err("Request timed out");
ops[i] = cop;
- cn10k_cpt_dequeue_post_process(qp, cop, infl_req);
+ cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
uint64_t head;
int ret;
+ const union cpt_res_s res = {
+ .cn10k.compcode = CPT_COMP_NOT_DONE,
+ };
+
pend_q = &qp->pend_q;
const uint64_t lmt_base = qp->lf.lmt_base;
infl_req_1->op_flags = 0;
infl_req_2->op_flags = 0;
- infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+ __atomic_store_n(&infl_req_1->res.u64[0], res.u64[0],
+ __ATOMIC_RELAXED);
inst[0].res_addr = (uint64_t)&infl_req_1->res;
- infl_req_2->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+ __atomic_store_n(&infl_req_2->res.u64[0], res.u64[0],
+ __ATOMIC_RELAXED);
inst[1].res_addr = (uint64_t)&infl_req_2->res;
ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
static inline void
cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
- struct cpt_inflight_req *infl_req)
+ struct cpt_inflight_req *infl_req,
+ struct cpt_cn9k_res_s *res)
{
- struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
unsigned int sz;
if (likely(res->compcode == CPT_COMP_GOOD)) {
struct cpt_inflight_req *infl_req;
struct rte_crypto_op *cop;
struct cnxk_cpt_qp *qp;
+ union cpt_res_s res;
infl_req = (struct cpt_inflight_req *)(get_work1);
cop = infl_req->cop;
qp = infl_req->qp;
- cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
+
+ cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn9k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
struct cpt_inflight_req *infl_req;
struct cnxk_cpt_qp *qp = qptr;
struct pending_queue *pend_q;
- struct cpt_cn9k_res_s *res;
uint64_t infl_cnt, pq_tail;
struct rte_crypto_op *cop;
+ union cpt_res_s res;
int i;
pend_q = &qp->pend_q;
for (i = 0; i < nb_ops; i++) {
infl_req = &pend_q->req_queue[pq_tail];
- res = (struct cpt_cn9k_res_s *)&infl_req->res;
+ res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
+ __ATOMIC_RELAXED);
- if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
+ if (unlikely(res.cn9k.compcode == CPT_COMP_NOT_DONE)) {
if (unlikely(rte_get_timer_cycles() >
pend_q->time_out)) {
plt_err("Request timed out");
ops[i] = cop;
- cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
+ cn9k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn9k);
if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
rte_mempool_put(qp->meta_info.pool, infl_req->mdata);