*/
#define CRYPTO_OCTEONTX 0x1
+/* Default command timeout in seconds */
+#define DEFAULT_COMMAND_TIMEOUT 4
+
#define CPT_COUNT_THOLD 32
#define CPT_TIMER_THOLD 0x3F
#ifndef _CPT_REQUEST_MGR_H_
#define _CPT_REQUEST_MGR_H_
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+
#include "cpt_common.h"
#include "cpt_mcode_defines.h"
return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
}
+static __rte_always_inline int32_t __hot
+cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
+ void *req)
+{
+ struct cpt_request_info *user_req = (struct cpt_request_info *)req;
+ int32_t ret = 0;
+
+ if (unlikely(!req))
+ return 0;
+
+ if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
+ return -EAGAIN;
+
+ fill_cpt_inst(instance, req);
+
+ CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
+
+ /* Fill time_out cycles */
+ user_req->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+ user_req->extra_time = 0;
+
+ /* Default mode of software queue */
+ mark_cpt_inst(instance);
+
+ pqueue->rid_queue[pqueue->enq_tail].rid =
+ (uintptr_t)user_req;
+ /* We will use soft queue length here to limit
+ * requests
+ */
+ MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
+ pqueue->pending_count += 1;
+
+ CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
+ "op: %p", user_req, user_req->op);
+
+ return ret;
+}
+
static __rte_always_inline int __hot
cpt_pmd_crypto_operation(struct cpt_instance *instance,
struct rte_crypto_op *op, struct pending_queue *pqueue,
int ret = 0;
uint64_t cpt_op;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
- RTE_SET_USED(pqueue);
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
int sess_len;
goto req_fail;
}
+ /* Enqueue prepared instruction to HW */
+ ret = cpt_enqueue_req(instance, pqueue, prep_req);
+
if (unlikely(ret)) {
if (unlikely(ret == -EAGAIN))
goto req_fail;
return vqx_dbell.s.dbell_cnt;
}
+static __rte_always_inline void
+otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ /* Memory barrier to flush pending writes */
+ rte_smp_wmb();
+ otx_cpt_write_vq_doorbell(cptvf, count);
+}
+
static __rte_always_inline void *
get_cpt_inst(struct command_queue *cqueue)
{
queue->idx = 0;
queue->cchunk = cchunk;
}
+}
+static __rte_always_inline uint8_t
+check_nb_command_id(struct cpt_request_info *user_req,
+ struct cpt_instance *instance)
+{
+ /* Required for dequeue operation. Adding a dummy routine for now */
+ RTE_SET_USED(user_req);
+ RTE_SET_USED(instance);
+ return 0;
}
#endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
}
}
+static uint16_t
+otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ uint16_t count = 0;
+ int ret;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+
+ count = DEFAULT_CMD_QLEN - pqueue->pending_count;
+ if (nb_ops > count)
+ nb_ops = count;
+
+ count = 0;
+ while (likely(count < nb_ops)) {
+ ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
+ otx_cryptodev_driver_id);
+ if (unlikely(ret))
+ break;
+ count++;
+ }
+ otx_cpt_ring_dbell(instance, count);
+ return count;
+}
+
static struct rte_cryptodev_ops cptvf_ops = {
/* Device related operations */
.dev_configure = otx_cpt_dev_config,
c_dev->dev_ops = &cptvf_ops;
- c_dev->enqueue_burst = NULL;
+ c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
c_dev->dequeue_burst = NULL;
c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |