1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2018 NXP
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
27 /* RTA header files */
28 #include <hw/desc/common.h>
29 #include <hw/desc/algo.h>
30 #include <hw/desc/ipsec.h>
34 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
35 static uint8_t cryptodev_driver_id;
38 enum rta_sec_era rta_sec_era;
40 /* Lists the states possible for the SEC user space driver. */
41 enum sec_driver_state_e {
42 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
43 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
44 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
47 /* Job rings used for communication with SEC HW */
48 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
50 /* The current state of SEC user space driver */
51 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
53 /* The number of job rings used by SEC user space driver */
54 static int g_job_rings_no;
55 static int g_job_rings_max;
57 struct sec_outring_entry {
58 phys_addr_t desc; /* Pointer to completed descriptor */
59 uint32_t status; /* Status for completed descriptor */
62 /* virtual address conversin when mempool support is available for ctx */
63 static inline phys_addr_t
64 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
66 PMD_INIT_FUNC_TRACE();
67 return (size_t)vaddr - ctx->vtop_offset;
71 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
73 PMD_INIT_FUNC_TRACE();
74 /* report op status to sym->op and then free the ctx memeory */
75 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
81 struct caam_jr_op_ctx *ctx;
84 PMD_INIT_FUNC_TRACE();
85 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
87 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
91 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
92 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
93 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
94 * each packet, memset is costlier than dcbz_64().
96 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
97 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
98 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
99 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
101 ctx->ctx_pool = ses->ctx_pool;
102 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
108 is_cipher_only(struct caam_jr_session *ses)
110 PMD_INIT_FUNC_TRACE();
111 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
112 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
116 is_auth_only(struct caam_jr_session *ses)
118 PMD_INIT_FUNC_TRACE();
119 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
120 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
124 is_aead(struct caam_jr_session *ses)
126 PMD_INIT_FUNC_TRACE();
127 return ((ses->cipher_alg == 0) &&
128 (ses->auth_alg == 0) &&
129 (ses->aead_alg != 0));
133 is_auth_cipher(struct caam_jr_session *ses)
135 PMD_INIT_FUNC_TRACE();
136 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
137 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
141 is_encode(struct caam_jr_session *ses)
143 PMD_INIT_FUNC_TRACE();
144 return ses->dir == DIR_ENC;
148 is_decode(struct caam_jr_session *ses)
150 PMD_INIT_FUNC_TRACE();
151 return ses->dir == DIR_DEC;
155 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
157 PMD_INIT_FUNC_TRACE();
158 switch (ses->auth_alg) {
159 case RTE_CRYPTO_AUTH_NULL:
160 ses->digest_length = 0;
162 case RTE_CRYPTO_AUTH_MD5_HMAC:
163 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
164 alginfo_a->algmode = OP_ALG_AAI_HMAC;
166 case RTE_CRYPTO_AUTH_SHA1_HMAC:
167 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
168 alginfo_a->algmode = OP_ALG_AAI_HMAC;
170 case RTE_CRYPTO_AUTH_SHA224_HMAC:
171 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
172 alginfo_a->algmode = OP_ALG_AAI_HMAC;
174 case RTE_CRYPTO_AUTH_SHA256_HMAC:
175 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
176 alginfo_a->algmode = OP_ALG_AAI_HMAC;
178 case RTE_CRYPTO_AUTH_SHA384_HMAC:
179 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
180 alginfo_a->algmode = OP_ALG_AAI_HMAC;
182 case RTE_CRYPTO_AUTH_SHA512_HMAC:
183 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
184 alginfo_a->algmode = OP_ALG_AAI_HMAC;
187 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
192 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
194 PMD_INIT_FUNC_TRACE();
195 switch (ses->cipher_alg) {
196 case RTE_CRYPTO_CIPHER_NULL:
198 case RTE_CRYPTO_CIPHER_AES_CBC:
199 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
200 alginfo_c->algmode = OP_ALG_AAI_CBC;
202 case RTE_CRYPTO_CIPHER_3DES_CBC:
203 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
204 alginfo_c->algmode = OP_ALG_AAI_CBC;
206 case RTE_CRYPTO_CIPHER_AES_CTR:
207 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
208 alginfo_c->algmode = OP_ALG_AAI_CTR;
211 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
216 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
218 PMD_INIT_FUNC_TRACE();
219 switch (ses->aead_alg) {
220 case RTE_CRYPTO_AEAD_AES_GCM:
221 alginfo->algtype = OP_ALG_ALGSEL_AES;
222 alginfo->algmode = OP_ALG_AAI_GCM;
225 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
229 /* prepare command block of the session */
231 caam_jr_prep_cdb(struct caam_jr_session *ses)
233 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
234 int32_t shared_desc_len = 0;
237 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
243 PMD_INIT_FUNC_TRACE();
245 caam_jr_dma_free(ses->cdb);
247 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
249 CAAM_JR_ERR("failed to allocate memory for cdb\n");
255 memset(cdb, 0, sizeof(struct sec_cdb));
257 if (is_cipher_only(ses)) {
258 caam_cipher_alg(ses, &alginfo_c);
259 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
260 CAAM_JR_ERR("not supported cipher alg");
265 alginfo_c.key = (size_t)ses->cipher_key.data;
266 alginfo_c.keylen = ses->cipher_key.length;
267 alginfo_c.key_enc_flags = 0;
268 alginfo_c.key_type = RTA_DATA_IMM;
270 shared_desc_len = cnstr_shdsc_blkcipher(
276 } else if (is_auth_only(ses)) {
277 caam_auth_alg(ses, &alginfo_a);
278 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
279 CAAM_JR_ERR("not supported auth alg");
284 alginfo_a.key = (size_t)ses->auth_key.data;
285 alginfo_a.keylen = ses->auth_key.length;
286 alginfo_a.key_enc_flags = 0;
287 alginfo_a.key_type = RTA_DATA_IMM;
289 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
293 } else if (is_aead(ses)) {
294 caam_aead_alg(ses, &alginfo);
295 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
296 CAAM_JR_ERR("not supported aead alg");
300 alginfo.key = (size_t)ses->aead_key.data;
301 alginfo.keylen = ses->aead_key.length;
302 alginfo.key_enc_flags = 0;
303 alginfo.key_type = RTA_DATA_IMM;
305 if (ses->dir == DIR_ENC)
306 shared_desc_len = cnstr_shdsc_gcm_encap(
307 cdb->sh_desc, true, swap,
312 shared_desc_len = cnstr_shdsc_gcm_decap(
313 cdb->sh_desc, true, swap,
318 caam_cipher_alg(ses, &alginfo_c);
319 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
320 CAAM_JR_ERR("not supported cipher alg");
325 alginfo_c.key = (size_t)ses->cipher_key.data;
326 alginfo_c.keylen = ses->cipher_key.length;
327 alginfo_c.key_enc_flags = 0;
328 alginfo_c.key_type = RTA_DATA_IMM;
330 caam_auth_alg(ses, &alginfo_a);
331 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
332 CAAM_JR_ERR("not supported auth alg");
337 alginfo_a.key = (size_t)ses->auth_key.data;
338 alginfo_a.keylen = ses->auth_key.length;
339 alginfo_a.key_enc_flags = 0;
340 alginfo_a.key_type = RTA_DATA_IMM;
342 cdb->sh_desc[0] = alginfo_c.keylen;
343 cdb->sh_desc[1] = alginfo_a.keylen;
344 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
346 (unsigned int *)cdb->sh_desc,
347 &cdb->sh_desc[2], 2);
350 CAAM_JR_ERR("Crypto: Incorrect key lengths");
354 if (cdb->sh_desc[2] & 1)
355 alginfo_c.key_type = RTA_DATA_IMM;
357 alginfo_c.key = (size_t)caam_jr_mem_vtop(
358 (void *)(size_t)alginfo_c.key);
359 alginfo_c.key_type = RTA_DATA_PTR;
361 if (cdb->sh_desc[2] & (1<<1))
362 alginfo_a.key_type = RTA_DATA_IMM;
364 alginfo_a.key = (size_t)caam_jr_mem_vtop(
365 (void *)(size_t)alginfo_a.key);
366 alginfo_a.key_type = RTA_DATA_PTR;
371 /* Auth_only_len is set as 0 here and it will be
372 * overwritten in fd for each packet.
374 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
375 true, swap, &alginfo_c, &alginfo_a,
377 ses->digest_length, ses->dir);
380 if (shared_desc_len < 0) {
381 CAAM_JR_ERR("error in preparing command block");
382 return shared_desc_len;
386 SEC_DUMP_DESC(cdb->sh_desc);
389 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
394 /* @brief Poll the HW for already processed jobs in the JR
395 * and silently discard the available jobs or notify them to UA
396 * with indicated error code.
398 * @param [in,out] job_ring The job ring to poll.
399 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
400 * descriptors are to be discarded
401 * or notified to UA with given error_code.
402 * @param [out] notified_descs Number of notified descriptors. Can be NULL
403 * if do_notify is #FALSE
406 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
408 uint32_t *notified_descs)
410 int32_t jobs_no_to_discard = 0;
411 int32_t discarded_descs_no = 0;
413 PMD_INIT_FUNC_TRACE();
414 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
415 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
417 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
419 /* Discard all jobs */
420 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
421 job_ring, job_ring->pidx, job_ring->cidx,
424 while (jobs_no_to_discard > discarded_descs_no) {
425 discarded_descs_no++;
426 /* Now increment the consumer index for the current job ring,
427 * AFTER saving job in temporary location!
428 * Increment the consumer index for the current job ring
430 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
433 hw_remove_entries(job_ring, 1);
436 if (do_notify == true) {
437 ASSERT(notified_descs != NULL);
438 *notified_descs = discarded_descs_no;
442 /* @brief Poll the HW for already processed jobs in the JR
443 * and notify the available jobs to UA.
445 * @param [in] job_ring The job ring to poll.
446 * @param [in] limit The maximum number of jobs to notify.
447 * If set to negative value, all available jobs are
450 * @retval >=0 for No of jobs notified to UA.
451 * @retval -1 for error
454 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
455 struct rte_crypto_op **ops, int32_t limit,
456 struct caam_jr_qp *jr_qp)
458 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
459 int32_t number_of_jobs_available = 0;
460 int32_t notified_descs_no = 0;
461 uint32_t sec_error_code = 0;
462 struct job_descriptor *current_desc;
463 phys_addr_t current_desc_addr;
464 phys_addr_t *temp_addr;
465 struct caam_jr_op_ctx *ctx;
467 PMD_INIT_FUNC_TRACE();
468 /* TODO check for ops have memory*/
469 /* check here if any JR error that cannot be written
470 * in the output status word has occurred
472 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
473 CAAM_JR_INFO("err received");
474 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
475 GET_JR_REG(JRINT, job_ring));
476 if (unlikely(sec_error_code)) {
477 hw_job_ring_error_print(job_ring, sec_error_code);
481 /* compute the number of jobs available in the job ring based on the
482 * producer and consumer index values.
484 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
485 /* Compute the number of notifications that need to be raised to UA
486 * If limit > total number of done jobs -> notify all done jobs
487 * If limit = 0 -> error
488 * If limit < total number of done jobs -> notify a number
489 * of done jobs equal with limit
491 jobs_no_to_notify = (limit > number_of_jobs_available) ?
492 number_of_jobs_available : limit;
494 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
495 job_ring, job_ring->pidx, job_ring->cidx,
496 limit, number_of_jobs_available, jobs_no_to_notify);
500 while (jobs_no_to_notify > notified_descs_no) {
501 static uint64_t false_alarm;
502 static uint64_t real_poll;
504 /* Get job status here */
505 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
506 /* Get completed descriptor */
507 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
508 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
511 /* todo check if it is false alarm no desc present */
512 if (!current_desc_addr) {
514 printf("false alarm %" PRIu64 "real %" PRIu64
515 " sec_err =0x%x cidx Index =0%d\n",
516 false_alarm, real_poll,
517 sec_error_code, job_ring->cidx);
518 rte_panic("CAAM JR descriptor NULL");
519 return notified_descs_no;
521 current_desc = (struct job_descriptor *)
522 caam_jr_dma_ptov(current_desc_addr);
523 /* now increment the consumer index for the current job ring,
524 * AFTER saving job in temporary location!
526 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
528 /* Signal that the job has been processed and the slot is free*/
529 hw_remove_entries(job_ring, 1);
530 /*TODO for multiple ops, packets*/
531 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
532 if (unlikely(sec_error_code)) {
533 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
534 job_ring->cidx, sec_error_code);
535 hw_handle_job_ring_error(job_ring, sec_error_code);
536 //todo improve with exact errors
537 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
540 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
542 if (ctx->op->sym->m_dst) {
543 rte_hexdump(stdout, "PROCESSED",
544 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
545 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
547 rte_hexdump(stdout, "PROCESSED",
548 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
549 rte_pktmbuf_data_len(ctx->op->sym->m_src));
553 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
556 if (ctx->op->sym->m_dst) {
557 /*TODO check for ip header or other*/
558 ip4_hdr = (struct ip *)
559 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
560 ctx->op->sym->m_dst->pkt_len =
561 rte_be_to_cpu_16(ip4_hdr->ip_len);
562 ctx->op->sym->m_dst->data_len =
563 rte_be_to_cpu_16(ip4_hdr->ip_len);
565 ip4_hdr = (struct ip *)
566 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
567 ctx->op->sym->m_src->pkt_len =
568 rte_be_to_cpu_16(ip4_hdr->ip_len);
569 ctx->op->sym->m_src->data_len =
570 rte_be_to_cpu_16(ip4_hdr->ip_len);
574 caam_jr_op_ending(ctx);
578 return notified_descs_no;
582 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
585 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
586 struct sec_job_ring_t *ring = jr_qp->ring;
590 PMD_INIT_FUNC_TRACE();
591 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
594 * If nb_ops < 0 -> poll JR until no more notifications are available.
595 * If nb_ops > 0 -> poll JR until limit is reached.
598 /* Run hw poll job ring */
599 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
601 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
605 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
607 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
608 if (num_rx < nb_ops) {
609 ret = caam_jr_enable_irqs(ring->irq_fd);
610 SEC_ASSERT(ret == 0, ret,
611 "Failed to enable irqs for job ring %p", ring);
613 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
615 /* Always enable IRQ generation when in pure IRQ mode */
616 ret = caam_jr_enable_irqs(ring->irq_fd);
617 SEC_ASSERT(ret == 0, ret,
618 "Failed to enable irqs for job ring %p", ring);
621 jr_qp->rx_pkts += num_rx;
626 static inline struct caam_jr_op_ctx *
627 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
629 struct rte_crypto_sym_op *sym = op->sym;
630 struct caam_jr_op_ctx *ctx;
631 struct sec4_sg_entry *sg;
632 rte_iova_t start_addr;
634 uint64_t sdesc_offset;
635 struct sec_job_descriptor_t *jobdescr;
637 PMD_INIT_FUNC_TRACE();
638 ctx = caam_jr_alloc_ctx(ses);
645 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
647 start_addr = rte_pktmbuf_iova(sym->m_src);
649 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
651 SEC_JD_INIT(jobdescr);
652 SEC_JD_SET_SD(jobdescr,
653 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
654 cdb->sh_hdr.hi.field.idlen);
657 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
658 0, ses->digest_length);
661 if (is_decode(ses)) {
663 SEC_JD_SET_IN_PTR(jobdescr,
664 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
665 (sym->auth.data.length + ses->digest_length));
666 /* enabling sg list */
667 (jobdescr)->seq_in.command.word |= 0x01000000;
669 /* hash result or digest, save digest first */
670 rte_memcpy(ctx->digest, sym->auth.digest.data,
672 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
673 sg->len = cpu_to_caam32(sym->auth.data.length);
676 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
678 /* let's check digest by hw */
680 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
681 sg->len = cpu_to_caam32(ses->digest_length);
683 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
685 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
686 sym->auth.data.offset, sym->auth.data.length);
691 static inline struct caam_jr_op_ctx *
692 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
694 struct rte_crypto_sym_op *sym = op->sym;
695 struct caam_jr_op_ctx *ctx;
696 struct sec4_sg_entry *sg;
697 rte_iova_t src_start_addr, dst_start_addr;
699 uint64_t sdesc_offset;
700 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
702 struct sec_job_descriptor_t *jobdescr;
704 PMD_INIT_FUNC_TRACE();
705 ctx = caam_jr_alloc_ctx(ses);
711 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
713 src_start_addr = rte_pktmbuf_iova(sym->m_src);
715 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
717 dst_start_addr = src_start_addr;
719 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
721 SEC_JD_INIT(jobdescr);
722 SEC_JD_SET_SD(jobdescr,
723 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
724 cdb->sh_hdr.hi.field.idlen);
727 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
728 sym->m_src->data_off, sym->cipher.data.offset,
729 sym->cipher.data.length, ses->iv.length);
732 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
733 sym->cipher.data.offset,
734 sym->cipher.data.length + ses->iv.length);
738 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
739 sym->cipher.data.length + ses->iv.length);
741 (jobdescr)->seq_in.command.word |= 0x01000000;
743 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
744 sg->len = cpu_to_caam32(ses->iv.length);
747 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
748 sg->len = cpu_to_caam32(sym->cipher.data.length);
750 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
755 static inline struct caam_jr_op_ctx *
756 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
758 struct rte_crypto_sym_op *sym = op->sym;
759 struct caam_jr_op_ctx *ctx;
760 struct sec4_sg_entry *sg;
761 rte_iova_t src_start_addr, dst_start_addr;
764 uint64_t sdesc_offset;
765 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
767 struct sec_job_descriptor_t *jobdescr;
768 uint32_t auth_only_len;
770 PMD_INIT_FUNC_TRACE();
771 auth_only_len = op->sym->auth.data.length -
772 op->sym->cipher.data.length;
774 src_start_addr = rte_pktmbuf_iova(sym->m_src);
776 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
778 dst_start_addr = src_start_addr;
780 ctx = caam_jr_alloc_ctx(ses);
786 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
788 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
790 SEC_JD_INIT(jobdescr);
791 SEC_JD_SET_SD(jobdescr,
792 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
793 cdb->sh_hdr.hi.field.idlen);
797 if (is_encode(ses)) {
798 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
799 sg->len = cpu_to_caam32(ses->iv.length);
800 length += ses->iv.length;
803 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
804 sg->len = cpu_to_caam32(sym->auth.data.length);
805 length += sym->auth.data.length;
807 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
809 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
810 sg->len = cpu_to_caam32(ses->iv.length);
811 length += ses->iv.length;
814 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
815 sg->len = cpu_to_caam32(sym->auth.data.length);
816 length += sym->auth.data.length;
818 rte_memcpy(ctx->digest, sym->auth.digest.data,
821 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
822 sg->len = cpu_to_caam32(ses->digest_length);
823 length += ses->digest_length;
825 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
828 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
831 (jobdescr)->seq_in.command.word |= 0x01000000;
836 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
837 sg->len = cpu_to_caam32(sym->cipher.data.length);
838 length = sym->cipher.data.length;
840 if (is_encode(ses)) {
841 /* set auth output */
843 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
844 sg->len = cpu_to_caam32(ses->digest_length);
845 length += ses->digest_length;
848 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
850 SEC_JD_SET_OUT_PTR(jobdescr,
851 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
853 (jobdescr)->seq_out.command.word |= 0x01000000;
855 /* Auth_only_len is set as 0 in descriptor and it is
856 * overwritten here in the jd which will update
861 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
866 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
868 struct sec_job_ring_t *ring = qp->ring;
869 struct caam_jr_session *ses;
870 struct caam_jr_op_ctx *ctx = NULL;
871 struct sec_job_descriptor_t *jobdescr __rte_unused;
873 PMD_INIT_FUNC_TRACE();
874 switch (op->sess_type) {
875 case RTE_CRYPTO_OP_WITH_SESSION:
876 ses = (struct caam_jr_session *)
877 get_sym_session_private_data(op->sym->session,
878 cryptodev_driver_id);
881 CAAM_JR_DP_ERR("sessionless crypto op not supported");
886 if (unlikely(!ses->qp || ses->qp != qp)) {
887 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
889 caam_jr_prep_cdb(ses);
892 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
893 if (is_auth_cipher(ses))
894 ctx = build_cipher_auth(op, ses);
895 else if (is_aead(ses))
897 else if (is_auth_only(ses))
898 ctx = build_auth_only(op, ses);
899 else if (is_cipher_only(ses))
900 ctx = build_cipher_only(op, ses);
906 if (unlikely(!ctx)) {
908 CAAM_JR_ERR("not supported sec op");
913 rte_hexdump(stdout, "DECODE",
914 rte_pktmbuf_mtod(op->sym->m_src, void *),
915 rte_pktmbuf_data_len(op->sym->m_src));
917 rte_hexdump(stdout, "ENCODE",
918 rte_pktmbuf_mtod(op->sym->m_src, void *),
919 rte_pktmbuf_data_len(op->sym->m_src));
921 printf("\n JD before conversion\n");
922 for (int i = 0; i < 12; i++)
923 printf("\n 0x%08x", ctx->jobdes.desc[i]);
926 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
927 ring, ring->pidx, ring->cidx);
929 /* todo - do we want to retry */
930 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
931 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
932 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
933 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
934 caam_jr_op_ending(ctx);
939 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
940 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
942 jobdescr->deschdr.command.word =
943 cpu_to_caam32(jobdescr->deschdr.command.word);
944 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
945 jobdescr->seq_out.command.word =
946 cpu_to_caam32(jobdescr->seq_out.command.word);
947 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
948 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
949 jobdescr->seq_in.command.word =
950 cpu_to_caam32(jobdescr->seq_in.command.word);
951 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
952 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
953 jobdescr->load_dpovrd.command.word =
954 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
955 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
958 /* Set ptr in input ring to current descriptor */
959 sec_write_addr(&ring->input_ring[ring->pidx],
960 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
963 /* Notify HW that a new job is enqueued */
964 hw_enqueue_desc_on_job_ring(ring);
966 /* increment the producer index for the current job ring */
967 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
973 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
976 /* Function to transmit the frames to given device and queuepair */
979 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
982 PMD_INIT_FUNC_TRACE();
983 /*Prepare each packet which is to be sent*/
984 for (loop = 0; loop < nb_ops; loop++) {
985 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
990 jr_qp->tx_pkts += num_tx;
995 /* Release queue pair */
997 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1000 struct sec_job_ring_t *internals;
1001 struct caam_jr_qp *qp = NULL;
1003 PMD_INIT_FUNC_TRACE();
1004 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1006 internals = dev->data->dev_private;
1007 if (qp_id >= internals->max_nb_queue_pairs) {
1008 CAAM_JR_ERR("Max supported qpid %d",
1009 internals->max_nb_queue_pairs);
1013 qp = &internals->qps[qp_id];
1015 dev->data->queue_pairs[qp_id] = NULL;
1020 /* Setup a queue pair */
1022 caam_jr_queue_pair_setup(
1023 struct rte_cryptodev *dev, uint16_t qp_id,
1024 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1025 __rte_unused int socket_id,
1026 __rte_unused struct rte_mempool *session_pool)
1028 struct sec_job_ring_t *internals;
1029 struct caam_jr_qp *qp = NULL;
1031 PMD_INIT_FUNC_TRACE();
1032 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1034 internals = dev->data->dev_private;
1035 if (qp_id >= internals->max_nb_queue_pairs) {
1036 CAAM_JR_ERR("Max supported qpid %d",
1037 internals->max_nb_queue_pairs);
1041 qp = &internals->qps[qp_id];
1042 qp->ring = internals;
1043 dev->data->queue_pairs[qp_id] = qp;
1048 /* Return the number of allocated queue pairs */
1050 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1052 PMD_INIT_FUNC_TRACE();
1054 return dev->data->nb_queue_pairs;
1057 /* Returns the size of the aesni gcm session structure */
1059 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1061 PMD_INIT_FUNC_TRACE();
1063 return sizeof(struct caam_jr_session);
1067 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1068 struct rte_crypto_sym_xform *xform,
1069 struct caam_jr_session *session)
1071 PMD_INIT_FUNC_TRACE();
1072 session->cipher_alg = xform->cipher.algo;
1073 session->iv.length = xform->cipher.iv.length;
1074 session->iv.offset = xform->cipher.iv.offset;
1075 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1076 RTE_CACHE_LINE_SIZE);
1077 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1078 CAAM_JR_ERR("No Memory for cipher key\n");
1081 session->cipher_key.length = xform->cipher.key.length;
1083 memcpy(session->cipher_key.data, xform->cipher.key.data,
1084 xform->cipher.key.length);
1085 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1092 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1093 struct rte_crypto_sym_xform *xform,
1094 struct caam_jr_session *session)
1096 PMD_INIT_FUNC_TRACE();
1097 session->auth_alg = xform->auth.algo;
1098 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1099 RTE_CACHE_LINE_SIZE);
1100 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1101 CAAM_JR_ERR("No Memory for auth key\n");
1104 session->auth_key.length = xform->auth.key.length;
1105 session->digest_length = xform->auth.digest_length;
1107 memcpy(session->auth_key.data, xform->auth.key.data,
1108 xform->auth.key.length);
1109 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1116 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1117 struct rte_crypto_sym_xform *xform,
1118 struct caam_jr_session *session)
1120 PMD_INIT_FUNC_TRACE();
1121 session->aead_alg = xform->aead.algo;
1122 session->iv.length = xform->aead.iv.length;
1123 session->iv.offset = xform->aead.iv.offset;
1124 session->auth_only_len = xform->aead.aad_length;
1125 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1126 RTE_CACHE_LINE_SIZE);
1127 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1128 CAAM_JR_ERR("No Memory for aead key\n");
1131 session->aead_key.length = xform->aead.key.length;
1132 session->digest_length = xform->aead.digest_length;
1134 memcpy(session->aead_key.data, xform->aead.key.data,
1135 xform->aead.key.length);
1136 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1143 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1144 struct rte_crypto_sym_xform *xform, void *sess)
1146 struct sec_job_ring_t *internals = dev->data->dev_private;
1147 struct caam_jr_session *session = sess;
1149 PMD_INIT_FUNC_TRACE();
1151 if (unlikely(sess == NULL)) {
1152 CAAM_JR_ERR("invalid session struct");
1156 /* Default IV length = 0 */
1157 session->iv.length = 0;
1160 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1161 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1162 caam_jr_cipher_init(dev, xform, session);
1164 /* Authentication Only */
1165 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1166 xform->next == NULL) {
1167 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1168 caam_jr_auth_init(dev, xform, session);
1170 /* Cipher then Authenticate */
1171 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1172 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1173 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1174 caam_jr_cipher_init(dev, xform, session);
1175 caam_jr_auth_init(dev, xform->next, session);
1177 CAAM_JR_ERR("Not supported: Auth then Cipher");
1181 /* Authenticate then Cipher */
1182 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1183 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1184 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1185 caam_jr_auth_init(dev, xform, session);
1186 caam_jr_cipher_init(dev, xform->next, session);
1188 CAAM_JR_ERR("Not supported: Auth then Cipher");
1192 /* AEAD operation for AES-GCM kind of Algorithms */
1193 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1194 xform->next == NULL) {
1195 caam_jr_aead_init(dev, xform, session);
1198 CAAM_JR_ERR("Invalid crypto type");
1201 session->ctx_pool = internals->ctx_pool;
1206 rte_free(session->cipher_key.data);
1207 rte_free(session->auth_key.data);
1208 memset(session, 0, sizeof(struct caam_jr_session));
1214 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1215 struct rte_crypto_sym_xform *xform,
1216 struct rte_cryptodev_sym_session *sess,
1217 struct rte_mempool *mempool)
1219 void *sess_private_data;
1222 PMD_INIT_FUNC_TRACE();
1224 if (rte_mempool_get(mempool, &sess_private_data)) {
1225 CAAM_JR_ERR("Couldn't get object from session mempool");
1229 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1230 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1232 CAAM_JR_ERR("failed to configure session parameters");
1233 /* Return session to mempool */
1234 rte_mempool_put(mempool, sess_private_data);
1238 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1243 /* Clear the memory of session so it doesn't leave key material behind */
1245 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1246 struct rte_cryptodev_sym_session *sess)
1248 uint8_t index = dev->driver_id;
1249 void *sess_priv = get_sym_session_private_data(sess, index);
1250 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1252 PMD_INIT_FUNC_TRACE();
1255 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1257 rte_free(s->cipher_key.data);
1258 rte_free(s->auth_key.data);
1259 memset(s, 0, sizeof(struct caam_jr_session));
1260 set_sym_session_private_data(sess, index, NULL);
1261 rte_mempool_put(sess_mp, sess_priv);
1266 caam_jr_dev_configure(struct rte_cryptodev *dev,
1267 struct rte_cryptodev_config *config __rte_unused)
1270 struct sec_job_ring_t *internals;
1272 PMD_INIT_FUNC_TRACE();
1274 internals = dev->data->dev_private;
1275 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
1276 if (!internals->ctx_pool) {
1277 internals->ctx_pool = rte_mempool_create((const char *)str,
1279 sizeof(struct caam_jr_op_ctx),
1280 CTX_POOL_CACHE_SIZE, 0,
1281 NULL, NULL, NULL, NULL,
1283 if (!internals->ctx_pool) {
1284 CAAM_JR_ERR("%s create failed\n", str);
1288 CAAM_JR_INFO("mempool already created for dev_id : %d",
1295 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
1297 PMD_INIT_FUNC_TRACE();
1302 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
1304 PMD_INIT_FUNC_TRACE();
1308 caam_jr_dev_close(struct rte_cryptodev *dev)
1310 struct sec_job_ring_t *internals;
1312 PMD_INIT_FUNC_TRACE();
1317 internals = dev->data->dev_private;
1318 rte_mempool_free(internals->ctx_pool);
1319 internals->ctx_pool = NULL;
1325 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
1326 struct rte_cryptodev_info *info)
1328 struct sec_job_ring_t *internals = dev->data->dev_private;
1330 PMD_INIT_FUNC_TRACE();
1332 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1333 info->feature_flags = dev->feature_flags;
1334 info->capabilities = caam_jr_get_cryptodev_capabilities();
1335 info->sym.max_nb_sessions = internals->max_nb_sessions;
1336 info->driver_id = cryptodev_driver_id;
1340 static struct rte_cryptodev_ops caam_jr_ops = {
1341 .dev_configure = caam_jr_dev_configure,
1342 .dev_start = caam_jr_dev_start,
1343 .dev_stop = caam_jr_dev_stop,
1344 .dev_close = caam_jr_dev_close,
1345 .dev_infos_get = caam_jr_dev_infos_get,
1346 .queue_pair_setup = caam_jr_queue_pair_setup,
1347 .queue_pair_release = caam_jr_queue_pair_release,
1348 .queue_pair_count = caam_jr_queue_pair_count,
1349 .sym_session_get_size = caam_jr_sym_session_get_size,
1350 .sym_session_configure = caam_jr_sym_session_configure,
1351 .sym_session_clear = caam_jr_sym_session_clear
1355 /* @brief Flush job rings of any processed descs.
1356 * The processed descs are silently dropped,
1357 * WITHOUT being notified to UA.
1360 close_job_ring(struct sec_job_ring_t *job_ring)
1362 PMD_INIT_FUNC_TRACE();
1363 if (job_ring->irq_fd) {
1364 /* Producer index is frozen. If consumer index is not equal
1365 * with producer index, then we have descs to flush.
1367 while (job_ring->pidx != job_ring->cidx)
1368 hw_flush_job_ring(job_ring, false, NULL);
1370 /* free the uio job ring */
1371 free_job_ring(job_ring->irq_fd);
1372 job_ring->irq_fd = 0;
1373 caam_jr_dma_free(job_ring->input_ring);
1374 caam_jr_dma_free(job_ring->output_ring);
1379 /** @brief Release the software and hardware resources tied to a job ring.
1380 * @param [in] job_ring The job ring
1382 * @retval 0 for success
1383 * @retval -1 for error
1386 shutdown_job_ring(struct sec_job_ring_t *job_ring)
1390 PMD_INIT_FUNC_TRACE();
1391 ASSERT(job_ring != NULL);
1392 ret = hw_shutdown_job_ring(job_ring);
1393 SEC_ASSERT(ret == 0, ret,
1394 "Failed to shutdown hardware job ring %p",
1397 if (job_ring->coalescing_en)
1398 hw_job_ring_disable_coalescing(job_ring);
1400 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
1401 ret = caam_jr_disable_irqs(job_ring->irq_fd);
1402 SEC_ASSERT(ret == 0, ret,
1403 "Failed to disable irqs for job ring %p",
1411 * @brief Release the resources used by the SEC user space driver.
1413 * Reset and release SEC's job rings indicated by the User Application at
1414 * init_job_ring() and free any memory allocated internally.
1415 * Call once during application tear down.
1417 * @note In case there are any descriptors in-flight (descriptors received by
1418 * SEC driver for processing and for which no response was yet provided to UA),
1419 * the descriptors are discarded without any notifications to User Application.
1421 * @retval ::0 is returned for a successful execution
1422 * @retval ::-1 is returned if SEC driver release is in progress
1425 caam_jr_dev_uninit(struct rte_cryptodev *dev)
1427 struct sec_job_ring_t *internals;
1429 PMD_INIT_FUNC_TRACE();
1433 internals = dev->data->dev_private;
1434 rte_free(dev->security_ctx);
1436 /* If any descriptors in flight , poll and wait
1437 * until all descriptors are received and silently discarded.
1440 shutdown_job_ring(internals);
1441 close_job_ring(internals);
1442 rte_mempool_free(internals->ctx_pool);
1445 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
1447 /* last caam jr instance) */
1448 if (g_job_rings_no == 0)
1449 g_driver_state = SEC_DRIVER_STATE_IDLE;
1454 /* @brief Initialize the software and hardware resources tied to a job ring.
1455 * @param [in] jr_mode; Model to be used by SEC Driver to receive
1456 * notifications from SEC. Can be either
1457 * of the three: #SEC_NOTIFICATION_TYPE_NAPI
1458 * #SEC_NOTIFICATION_TYPE_IRQ or
1459 * #SEC_NOTIFICATION_TYPE_POLL
1460 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
1461 * startup. Used only when #SEC_NOTIFICATION_TYPE
1462 * is set to #SEC_NOTIFICATION_TYPE_NAPI.
1463 * @param [in] irq_coalescing_timer This value determines the maximum
1464 * amount of time after processing a
1465 * descriptor before raising an interrupt.
1466 * @param [in] irq_coalescing_count This value determines how many
1467 * descriptors are completed before
1468 * raising an interrupt.
1469 * @param [in] reg_base_addr, The job ring base address register
1470 * @param [in] irq_id The job ring interrupt identification number.
1471 * @retval job_ring_handle for successful job ring configuration
1472 * @retval NULL on error
1476 init_job_ring(void *reg_base_addr, uint32_t irq_id)
1478 struct sec_job_ring_t *job_ring = NULL;
1480 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
1482 int irq_coalescing_timer = 0;
1483 int irq_coalescing_count = 0;
1485 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
1486 if (g_job_rings[i].irq_fd == 0) {
1487 job_ring = &g_job_rings[i];
1492 if (job_ring == NULL) {
1493 CAAM_JR_ERR("No free job ring\n");
1497 job_ring->register_base_addr = reg_base_addr;
1498 job_ring->jr_mode = jr_mode;
1499 job_ring->napi_mode = 0;
1500 job_ring->irq_fd = irq_id;
1502 /* Allocate mem for input and output ring */
1504 /* Allocate memory for input ring */
1505 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
1506 SEC_DMA_MEM_INPUT_RING_SIZE);
1507 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
1509 /* Allocate memory for output ring */
1510 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
1511 SEC_DMA_MEM_OUTPUT_RING_SIZE);
1512 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
1514 /* Reset job ring in SEC hw and configure job ring registers */
1515 ret = hw_reset_job_ring(job_ring);
1517 CAAM_JR_ERR("Failed to reset hardware job ring");
1521 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
1522 /* When SEC US driver works in NAPI mode, the UA can select
1523 * if the driver starts with IRQs on or off.
1525 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
1526 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
1528 ret = caam_jr_enable_irqs(job_ring->irq_fd);
1530 CAAM_JR_ERR("Failed to enable irqs for job ring");
1534 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
1535 /* When SEC US driver works in pure interrupt mode,
1536 * IRQ's are always enabled.
1538 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
1540 ret = caam_jr_enable_irqs(job_ring->irq_fd);
1542 CAAM_JR_ERR("Failed to enable irqs for job ring");
1546 if (irq_coalescing_timer || irq_coalescing_count) {
1547 hw_job_ring_set_coalescing_param(job_ring,
1548 irq_coalescing_timer,
1549 irq_coalescing_count);
1551 hw_job_ring_enable_coalescing(job_ring);
1552 job_ring->coalescing_en = 1;
1555 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
1556 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
1557 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
1561 caam_jr_dma_free(job_ring->output_ring);
1562 caam_jr_dma_free(job_ring->input_ring);
1568 caam_jr_dev_init(const char *name,
1569 struct rte_vdev_device *vdev,
1570 struct rte_cryptodev_pmd_init_params *init_params)
1572 struct rte_cryptodev *dev;
1573 struct uio_job_ring *job_ring;
1574 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
1576 PMD_INIT_FUNC_TRACE();
1578 /* Validate driver state */
1579 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
1580 g_job_rings_max = sec_configure();
1581 if (!g_job_rings_max) {
1582 CAAM_JR_ERR("No job ring detected on UIO !!!!");
1585 /* Update driver state */
1586 g_driver_state = SEC_DRIVER_STATE_STARTED;
1589 if (g_job_rings_no >= g_job_rings_max) {
1590 CAAM_JR_ERR("No more job rings available max=%d!!!!",
1595 job_ring = config_job_ring();
1596 if (job_ring == NULL) {
1597 CAAM_JR_ERR("failed to create job ring");
1601 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
1603 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
1605 CAAM_JR_ERR("failed to create cryptodev vdev");
1608 /*TODO free it during teardown*/
1609 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
1612 if (!dev->data->dev_private) {
1613 CAAM_JR_ERR("Ring memory allocation failed\n");
1617 dev->driver_id = cryptodev_driver_id;
1618 dev->dev_ops = &caam_jr_ops;
1620 /* register rx/tx burst functions for data path */
1621 dev->dequeue_burst = caam_jr_dequeue_burst;
1622 dev->enqueue_burst = caam_jr_enqueue_burst;
1623 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1624 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1625 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1626 RTE_CRYPTODEV_FF_SECURITY |
1627 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
1628 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
1629 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1630 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
1631 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
1633 /* For secondary processes, we don't initialise any further as primary
1634 * has already done this work. Only check we don't need a different
1637 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1638 CAAM_JR_WARN("Device already init by primary process");
1642 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
1647 caam_jr_dev_uninit(dev);
1648 rte_cryptodev_pmd_release_device(dev);
1650 free_job_ring(job_ring->uio_fd);
1652 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
1658 /** Initialise CAAM JR crypto device */
1660 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
1662 struct rte_cryptodev_pmd_init_params init_params = {
1664 sizeof(struct sec_job_ring_t),
1666 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
1669 const char *input_args;
1671 name = rte_vdev_device_name(vdev);
1675 input_args = rte_vdev_device_args(vdev);
1676 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
1678 /* if sec device version is not configured */
1679 if (!rta_get_sec_era()) {
1680 const struct device_node *caam_node;
1682 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1683 const uint32_t *prop = of_get_property(caam_node,
1688 INTL_SEC_ERA(cpu_to_caam32(*prop)));
1693 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
1694 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
1696 "CAAM is compiled in BE mode for device with sec era > 8???\n");
1701 return caam_jr_dev_init(name, vdev, &init_params);
1704 /** Uninitialise CAAM JR crypto device */
1706 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
1708 struct rte_cryptodev *cryptodev;
1711 name = rte_vdev_device_name(vdev);
1715 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
1716 if (cryptodev == NULL)
1719 caam_jr_dev_uninit(cryptodev);
1721 return rte_cryptodev_pmd_destroy(cryptodev);
1724 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
1725 .probe = cryptodev_caam_jr_probe,
1726 .remove = cryptodev_caam_jr_remove
1729 static struct cryptodev_driver caam_jr_crypto_drv;
1731 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
1732 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
1733 "max_nb_queue_pairs=<int>"
1735 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
1736 cryptodev_driver_id);
1738 RTE_INIT(caam_jr_init_log)
1740 caam_jr_logtype = rte_log_register("pmd.crypto.caam");
1741 if (caam_jr_logtype >= 0)
1742 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);