1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
5 #ifndef _CPT_REQUEST_MGR_H_
6 #define _CPT_REQUEST_MGR_H_
8 #include <rte_branch_prediction.h>
9 #include <rte_cycles.h>
11 #include "cpt_common.h"
12 #include "cpt_mcode_defines.h"
14 #if CPT_MODEL == CRYPTO_OCTEONTX
15 #include "../../crypto/octeontx/otx_cryptodev_hw_access.h"
19 * This file defines the agreement between the common layer and the individual
20 * crypto drivers for OCTEON TX series. Datapath in otx* directory include this
21 * file and all these functions are static inlined for better performance.
26 * Get the session size
28 * This function is used in the data path.
33 static __rte_always_inline unsigned int
34 cpt_get_session_size(void)
36 unsigned int ctx_len = sizeof(struct cpt_ctx);
37 return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
40 static __rte_always_inline int32_t __hot
41 cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
44 struct cpt_request_info *user_req = (struct cpt_request_info *)req;
50 if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
53 fill_cpt_inst(instance, req);
55 CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
57 /* Fill time_out cycles */
58 user_req->time_out = rte_get_timer_cycles() +
59 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
60 user_req->extra_time = 0;
62 /* Default mode of software queue */
63 mark_cpt_inst(instance);
65 pqueue->rid_queue[pqueue->enq_tail].rid =
67 /* We will use soft queue length here to limit
70 MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
71 pqueue->pending_count += 1;
73 CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
74 "op: %p", user_req, user_req->op);
79 static __rte_always_inline int __hot
80 cpt_pmd_crypto_operation(struct cpt_instance *instance,
81 struct rte_crypto_op *op, struct pending_queue *pqueue,
82 uint8_t cpt_driver_id)
84 struct cpt_sess_misc *sess = NULL;
85 struct rte_crypto_sym_op *sym_op = op->sym;
86 void *prep_req = NULL, *mdata = NULL;
89 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
91 if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
94 sess_len = cpt_get_session_size();
96 sess = rte_calloc(__func__, 1, sess_len, 8);
100 sess->ctx_dma_addr = rte_malloc_virt2iova(sess) +
101 sizeof(struct cpt_sess_misc);
103 ret = instance_session_cfg(sym_op->xform, (void *)sess);
107 sess = (struct cpt_sess_misc *)
108 get_sym_session_private_data(sym_op->session,
112 cpt_op = sess->cpt_op;
114 if (likely(cpt_op & CPT_OP_CIPHER_MASK))
115 ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
118 ret = fill_digest_params(op, sess, &cptvf->meta_info,
122 CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
123 "ret 0x%x", op, (unsigned int)cpt_op, ret);
127 /* Enqueue prepared instruction to HW */
128 ret = cpt_enqueue_req(instance, pqueue, prep_req);
131 if (unlikely(ret == -EAGAIN))
133 CPT_LOG_DP_ERR("Error enqueing crypto request : error "
142 free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
146 static __rte_always_inline int32_t __hot
147 cpt_dequeue_burst(struct cpt_instance *instance, uint16_t cnt,
148 void *resp[], uint8_t cc[], struct pending_queue *pqueue)
150 struct cpt_request_info *user_req;
152 int i, count, pcount;
155 pcount = pqueue->pending_count;
156 count = (cnt > pcount) ? pcount : cnt;
158 for (i = 0; i < count; i++) {
159 rid_e = &pqueue->rid_queue[pqueue->deq_head];
160 user_req = (struct cpt_request_info *)(rid_e->rid);
162 if (likely((i+1) < count))
163 rte_prefetch_non_temporal((void *)rid_e[1].rid);
165 ret = check_nb_command_id(user_req, instance);
167 if (unlikely(ret == ERR_REQ_PENDING)) {
168 /* Stop checking for completions */
172 /* Return completion code and op handle */
173 cc[i] = (uint8_t)ret;
174 resp[i] = user_req->op;
175 CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
176 user_req, user_req->op, ret);
178 MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
179 pqueue->pending_count -= 1;
185 #endif /* _CPT_REQUEST_MGR_H_ */