1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2019 NXP
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
36 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
40 enum rta_sec_era rta_sec_era;
42 /* Lists the states possible for the SEC user space driver. */
43 enum sec_driver_state_e {
44 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
45 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
46 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
49 /* Job rings used for communication with SEC HW */
50 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
52 /* The current state of SEC user space driver */
53 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
55 /* The number of job rings used by SEC user space driver */
56 static int g_job_rings_no;
57 static int g_job_rings_max;
59 struct sec_outring_entry {
60 phys_addr_t desc; /* Pointer to completed descriptor */
61 uint32_t status; /* Status for completed descriptor */
64 /* virtual address conversin when mempool support is available for ctx */
65 static inline phys_addr_t
66 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
68 return (size_t)vaddr - ctx->vtop_offset;
72 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
74 /* report op status to sym->op and then free the ctx memory */
75 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
81 struct caam_jr_op_ctx *ctx;
84 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
86 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
90 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93 * each packet, memset is costlier than dcbz_64().
95 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
100 ctx->ctx_pool = ses->ctx_pool;
101 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108 struct rte_cryptodev_stats *stats)
110 struct caam_jr_qp **qp = (struct caam_jr_qp **)
111 dev->data->queue_pairs;
114 PMD_INIT_FUNC_TRACE();
116 CAAM_JR_ERR("Invalid stats ptr NULL");
119 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
121 CAAM_JR_WARN("Uninitialised queue pair");
125 stats->enqueued_count += qp[i]->tx_pkts;
126 stats->dequeued_count += qp[i]->rx_pkts;
127 stats->enqueue_err_count += qp[i]->tx_errs;
128 stats->dequeue_err_count += qp[i]->rx_errs;
129 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130 "\n\tTX Ring Full = %" PRIu64,
132 qp[i]->tx_ring_full);
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
140 struct caam_jr_qp **qp = (struct caam_jr_qp **)
141 (dev->data->queue_pairs);
143 PMD_INIT_FUNC_TRACE();
144 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
146 CAAM_JR_WARN("Uninitialised queue pair");
151 qp[i]->rx_poll_err = 0;
154 qp[i]->tx_ring_full = 0;
159 is_cipher_only(struct caam_jr_session *ses)
161 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
162 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
166 is_auth_only(struct caam_jr_session *ses)
168 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
169 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
173 is_aead(struct caam_jr_session *ses)
175 return ((ses->cipher_alg == 0) &&
176 (ses->auth_alg == 0) &&
177 (ses->aead_alg != 0));
181 is_auth_cipher(struct caam_jr_session *ses)
183 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
184 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
185 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
189 is_proto_ipsec(struct caam_jr_session *ses)
191 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
195 is_encode(struct caam_jr_session *ses)
197 return ses->dir == DIR_ENC;
201 is_decode(struct caam_jr_session *ses)
203 return ses->dir == DIR_DEC;
207 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
209 switch (ses->auth_alg) {
210 case RTE_CRYPTO_AUTH_NULL:
211 ses->digest_length = 0;
213 case RTE_CRYPTO_AUTH_MD5_HMAC:
215 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
216 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
217 alginfo_a->algmode = OP_ALG_AAI_HMAC;
219 case RTE_CRYPTO_AUTH_SHA1_HMAC:
221 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
222 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
223 alginfo_a->algmode = OP_ALG_AAI_HMAC;
225 case RTE_CRYPTO_AUTH_SHA224_HMAC:
227 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
228 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
229 alginfo_a->algmode = OP_ALG_AAI_HMAC;
231 case RTE_CRYPTO_AUTH_SHA256_HMAC:
233 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
234 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
235 alginfo_a->algmode = OP_ALG_AAI_HMAC;
237 case RTE_CRYPTO_AUTH_SHA384_HMAC:
239 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
240 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
241 alginfo_a->algmode = OP_ALG_AAI_HMAC;
243 case RTE_CRYPTO_AUTH_SHA512_HMAC:
245 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
246 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
247 alginfo_a->algmode = OP_ALG_AAI_HMAC;
250 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
255 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
257 switch (ses->cipher_alg) {
258 case RTE_CRYPTO_CIPHER_NULL:
260 case RTE_CRYPTO_CIPHER_AES_CBC:
262 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
263 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
264 alginfo_c->algmode = OP_ALG_AAI_CBC;
266 case RTE_CRYPTO_CIPHER_3DES_CBC:
268 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
269 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
270 alginfo_c->algmode = OP_ALG_AAI_CBC;
272 case RTE_CRYPTO_CIPHER_AES_CTR:
274 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
275 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
276 alginfo_c->algmode = OP_ALG_AAI_CTR;
279 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
284 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
286 switch (ses->aead_alg) {
287 case RTE_CRYPTO_AEAD_AES_GCM:
288 alginfo->algtype = OP_ALG_ALGSEL_AES;
289 alginfo->algmode = OP_ALG_AAI_GCM;
292 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
296 /* prepare command block of the session */
298 caam_jr_prep_cdb(struct caam_jr_session *ses)
300 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
301 int32_t shared_desc_len = 0;
304 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
311 caam_jr_dma_free(ses->cdb);
313 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
315 CAAM_JR_ERR("failed to allocate memory for cdb\n");
321 memset(cdb, 0, sizeof(struct sec_cdb));
323 if (is_cipher_only(ses)) {
324 caam_cipher_alg(ses, &alginfo_c);
325 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
326 CAAM_JR_ERR("not supported cipher alg");
331 alginfo_c.key = (size_t)ses->cipher_key.data;
332 alginfo_c.keylen = ses->cipher_key.length;
333 alginfo_c.key_enc_flags = 0;
334 alginfo_c.key_type = RTA_DATA_IMM;
336 shared_desc_len = cnstr_shdsc_blkcipher(
338 swap, SHR_NEVER, &alginfo_c,
341 } else if (is_auth_only(ses)) {
342 caam_auth_alg(ses, &alginfo_a);
343 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
344 CAAM_JR_ERR("not supported auth alg");
349 alginfo_a.key = (size_t)ses->auth_key.data;
350 alginfo_a.keylen = ses->auth_key.length;
351 alginfo_a.key_enc_flags = 0;
352 alginfo_a.key_type = RTA_DATA_IMM;
354 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
355 swap, SHR_NEVER, &alginfo_a,
358 } else if (is_aead(ses)) {
359 caam_aead_alg(ses, &alginfo);
360 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
361 CAAM_JR_ERR("not supported aead alg");
365 alginfo.key = (size_t)ses->aead_key.data;
366 alginfo.keylen = ses->aead_key.length;
367 alginfo.key_enc_flags = 0;
368 alginfo.key_type = RTA_DATA_IMM;
370 if (ses->dir == DIR_ENC)
371 shared_desc_len = cnstr_shdsc_gcm_encap(
372 cdb->sh_desc, true, swap,
377 shared_desc_len = cnstr_shdsc_gcm_decap(
378 cdb->sh_desc, true, swap,
383 caam_cipher_alg(ses, &alginfo_c);
384 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
385 CAAM_JR_ERR("not supported cipher alg");
390 alginfo_c.key = (size_t)ses->cipher_key.data;
391 alginfo_c.keylen = ses->cipher_key.length;
392 alginfo_c.key_enc_flags = 0;
393 alginfo_c.key_type = RTA_DATA_IMM;
395 caam_auth_alg(ses, &alginfo_a);
396 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
397 CAAM_JR_ERR("not supported auth alg");
402 alginfo_a.key = (size_t)ses->auth_key.data;
403 alginfo_a.keylen = ses->auth_key.length;
404 alginfo_a.key_enc_flags = 0;
405 alginfo_a.key_type = RTA_DATA_IMM;
407 cdb->sh_desc[0] = alginfo_c.keylen;
408 cdb->sh_desc[1] = alginfo_a.keylen;
409 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
411 (unsigned int *)cdb->sh_desc,
412 &cdb->sh_desc[2], 2);
415 CAAM_JR_ERR("Crypto: Incorrect key lengths");
419 if (cdb->sh_desc[2] & 1)
420 alginfo_c.key_type = RTA_DATA_IMM;
422 alginfo_c.key = (size_t)caam_jr_mem_vtop(
423 (void *)(size_t)alginfo_c.key);
424 alginfo_c.key_type = RTA_DATA_PTR;
426 if (cdb->sh_desc[2] & (1<<1))
427 alginfo_a.key_type = RTA_DATA_IMM;
429 alginfo_a.key = (size_t)caam_jr_mem_vtop(
430 (void *)(size_t)alginfo_a.key);
431 alginfo_a.key_type = RTA_DATA_PTR;
436 if (is_proto_ipsec(ses)) {
437 if (ses->dir == DIR_ENC) {
438 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
440 true, swap, SHR_SERIAL,
442 (uint8_t *)&ses->ip4_hdr,
443 &alginfo_c, &alginfo_a);
444 } else if (ses->dir == DIR_DEC) {
445 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
447 true, swap, SHR_SERIAL,
449 &alginfo_c, &alginfo_a);
452 /* Auth_only_len is overwritten in fd for each job */
453 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
454 true, swap, SHR_SERIAL,
455 &alginfo_c, &alginfo_a,
457 ses->digest_length, ses->dir);
461 if (shared_desc_len < 0) {
462 CAAM_JR_ERR("error in preparing command block");
463 return shared_desc_len;
467 SEC_DUMP_DESC(cdb->sh_desc);
470 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
475 /* @brief Poll the HW for already processed jobs in the JR
476 * and silently discard the available jobs or notify them to UA
477 * with indicated error code.
479 * @param [in,out] job_ring The job ring to poll.
480 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
481 * descriptors are to be discarded
482 * or notified to UA with given error_code.
483 * @param [out] notified_descs Number of notified descriptors. Can be NULL
484 * if do_notify is #FALSE
487 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
489 uint32_t *notified_descs)
491 int32_t jobs_no_to_discard = 0;
492 int32_t discarded_descs_no = 0;
494 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
495 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
497 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
499 /* Discard all jobs */
500 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
501 job_ring, job_ring->pidx, job_ring->cidx,
504 while (jobs_no_to_discard > discarded_descs_no) {
505 discarded_descs_no++;
506 /* Now increment the consumer index for the current job ring,
507 * AFTER saving job in temporary location!
508 * Increment the consumer index for the current job ring
510 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
513 hw_remove_entries(job_ring, 1);
516 if (do_notify == true) {
517 ASSERT(notified_descs != NULL);
518 *notified_descs = discarded_descs_no;
522 /* @brief Poll the HW for already processed jobs in the JR
523 * and notify the available jobs to UA.
525 * @param [in] job_ring The job ring to poll.
526 * @param [in] limit The maximum number of jobs to notify.
527 * If set to negative value, all available jobs are
530 * @retval >=0 for No of jobs notified to UA.
531 * @retval -1 for error
534 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
535 struct rte_crypto_op **ops, int32_t limit,
536 struct caam_jr_qp *jr_qp)
538 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
539 int32_t number_of_jobs_available = 0;
540 int32_t notified_descs_no = 0;
541 uint32_t sec_error_code = 0;
542 struct job_descriptor *current_desc;
543 phys_addr_t current_desc_addr;
544 phys_addr_t *temp_addr;
545 struct caam_jr_op_ctx *ctx;
547 /* TODO check for ops have memory*/
548 /* check here if any JR error that cannot be written
549 * in the output status word has occurred
551 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
552 CAAM_JR_INFO("err received");
553 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
554 GET_JR_REG(JRINT, job_ring));
555 if (unlikely(sec_error_code)) {
556 hw_job_ring_error_print(job_ring, sec_error_code);
560 /* compute the number of jobs available in the job ring based on the
561 * producer and consumer index values.
563 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
564 /* Compute the number of notifications that need to be raised to UA
565 * If limit > total number of done jobs -> notify all done jobs
566 * If limit = 0 -> error
567 * If limit < total number of done jobs -> notify a number
568 * of done jobs equal with limit
570 jobs_no_to_notify = (limit > number_of_jobs_available) ?
571 number_of_jobs_available : limit;
573 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
574 job_ring, job_ring->pidx, job_ring->cidx,
575 limit, number_of_jobs_available, jobs_no_to_notify);
579 while (jobs_no_to_notify > notified_descs_no) {
580 static uint64_t false_alarm;
581 static uint64_t real_poll;
583 /* Get job status here */
584 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
585 /* Get completed descriptor */
586 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
587 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
590 /* todo check if it is false alarm no desc present */
591 if (!current_desc_addr) {
593 printf("false alarm %" PRIu64 "real %" PRIu64
594 " sec_err =0x%x cidx Index =0%d\n",
595 false_alarm, real_poll,
596 sec_error_code, job_ring->cidx);
597 rte_panic("CAAM JR descriptor NULL");
598 return notified_descs_no;
600 current_desc = (struct job_descriptor *)
601 caam_jr_dma_ptov(current_desc_addr);
602 /* now increment the consumer index for the current job ring,
603 * AFTER saving job in temporary location!
605 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
607 /* Signal that the job has been processed and the slot is free*/
608 hw_remove_entries(job_ring, 1);
609 /*TODO for multiple ops, packets*/
610 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
611 if (unlikely(sec_error_code)) {
612 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
613 job_ring->cidx, sec_error_code);
614 hw_handle_job_ring_error(job_ring, sec_error_code);
615 //todo improve with exact errors
616 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
619 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
621 if (ctx->op->sym->m_dst) {
622 rte_hexdump(stdout, "PROCESSED",
623 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
624 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
626 rte_hexdump(stdout, "PROCESSED",
627 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
628 rte_pktmbuf_data_len(ctx->op->sym->m_src));
632 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
635 if (ctx->op->sym->m_dst) {
636 /*TODO check for ip header or other*/
637 ip4_hdr = (struct ip *)
638 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
639 ctx->op->sym->m_dst->pkt_len =
640 rte_be_to_cpu_16(ip4_hdr->ip_len);
641 ctx->op->sym->m_dst->data_len =
642 rte_be_to_cpu_16(ip4_hdr->ip_len);
644 ip4_hdr = (struct ip *)
645 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
646 ctx->op->sym->m_src->pkt_len =
647 rte_be_to_cpu_16(ip4_hdr->ip_len);
648 ctx->op->sym->m_src->data_len =
649 rte_be_to_cpu_16(ip4_hdr->ip_len);
653 caam_jr_op_ending(ctx);
657 return notified_descs_no;
661 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
664 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
665 struct sec_job_ring_t *ring = jr_qp->ring;
669 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
672 * If nb_ops < 0 -> poll JR until no more notifications are available.
673 * If nb_ops > 0 -> poll JR until limit is reached.
676 /* Run hw poll job ring */
677 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
679 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
683 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
685 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
686 if (num_rx < nb_ops) {
687 ret = caam_jr_enable_irqs(ring->irq_fd);
688 SEC_ASSERT(ret == 0, ret,
689 "Failed to enable irqs for job ring %p", ring);
691 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
693 /* Always enable IRQ generation when in pure IRQ mode */
694 ret = caam_jr_enable_irqs(ring->irq_fd);
695 SEC_ASSERT(ret == 0, ret,
696 "Failed to enable irqs for job ring %p", ring);
699 jr_qp->rx_pkts += num_rx;
706 * |<----data_len------->|
707 * |ip_header|ah_header|icv|payload|
712 static inline struct caam_jr_op_ctx *
713 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
715 struct rte_crypto_sym_op *sym = op->sym;
716 struct rte_mbuf *mbuf = sym->m_src;
717 struct caam_jr_op_ctx *ctx;
718 struct sec4_sg_entry *sg;
721 uint64_t sdesc_offset;
722 struct sec_job_descriptor_t *jobdescr;
730 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
731 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
736 ctx = caam_jr_alloc_ctx(ses);
743 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
745 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
747 SEC_JD_INIT(jobdescr);
748 SEC_JD_SET_SD(jobdescr,
749 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
750 cdb->sh_hdr.hi.field.idlen);
753 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
754 0, ses->digest_length);
758 length = sym->auth.data.length;
759 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
760 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
762 /* Successive segs */
766 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
767 sg->len = cpu_to_caam32(mbuf->data_len);
771 if (is_decode(ses)) {
772 /* digest verification case */
774 /* hash result or digest, save digest first */
775 rte_memcpy(ctx->digest, sym->auth.digest.data,
778 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
780 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
781 sg->len = cpu_to_caam32(ses->digest_length);
782 length += ses->digest_length;
784 sg->len -= ses->digest_length;
788 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
790 SEC_JD_SET_IN_PTR(jobdescr,
791 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
792 /* enabling sg list */
793 (jobdescr)->seq_in.command.word |= 0x01000000;
798 static inline struct caam_jr_op_ctx *
799 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
801 struct rte_crypto_sym_op *sym = op->sym;
802 struct caam_jr_op_ctx *ctx;
803 struct sec4_sg_entry *sg;
804 rte_iova_t start_addr;
806 uint64_t sdesc_offset;
807 struct sec_job_descriptor_t *jobdescr;
809 ctx = caam_jr_alloc_ctx(ses);
816 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
818 start_addr = rte_pktmbuf_iova(sym->m_src);
820 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
822 SEC_JD_INIT(jobdescr);
823 SEC_JD_SET_SD(jobdescr,
824 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
825 cdb->sh_hdr.hi.field.idlen);
828 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
829 0, ses->digest_length);
832 if (is_decode(ses)) {
834 SEC_JD_SET_IN_PTR(jobdescr,
835 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
836 (sym->auth.data.length + ses->digest_length));
837 /* enabling sg list */
838 (jobdescr)->seq_in.command.word |= 0x01000000;
840 /* hash result or digest, save digest first */
841 rte_memcpy(ctx->digest, sym->auth.digest.data,
843 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
844 sg->len = cpu_to_caam32(sym->auth.data.length);
847 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
849 /* let's check digest by hw */
851 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
852 sg->len = cpu_to_caam32(ses->digest_length);
854 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
856 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
857 sym->auth.data.offset, sym->auth.data.length);
862 static inline struct caam_jr_op_ctx *
863 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
865 struct rte_crypto_sym_op *sym = op->sym;
866 struct rte_mbuf *mbuf = sym->m_src;
867 struct caam_jr_op_ctx *ctx;
868 struct sec4_sg_entry *sg, *in_sg;
871 uint64_t sdesc_offset;
872 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
874 struct sec_job_descriptor_t *jobdescr;
879 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
882 reg_segs = mbuf->nb_segs * 2 + 2;
885 if (reg_segs > MAX_SG_ENTRIES) {
886 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
891 ctx = caam_jr_alloc_ctx(ses);
897 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
899 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
901 SEC_JD_INIT(jobdescr);
902 SEC_JD_SET_SD(jobdescr,
903 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
904 cdb->sh_hdr.hi.field.idlen);
907 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
908 sym->m_src->data_off, sym->cipher.data.offset,
909 sym->cipher.data.length, ses->iv.length);
918 length = sym->cipher.data.length;
920 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
921 + sym->cipher.data.offset);
922 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
924 /* Successive segs */
928 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
929 sg->len = cpu_to_caam32(mbuf->data_len);
933 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
935 SEC_JD_SET_OUT_PTR(jobdescr,
936 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
939 (jobdescr)->seq_out.command.word |= 0x01000000;
946 length = sym->cipher.data.length + ses->iv.length;
949 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
950 sg->len = cpu_to_caam32(ses->iv.length);
954 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
955 + sym->cipher.data.offset);
956 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
958 /* Successive segs */
962 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
963 sg->len = cpu_to_caam32(mbuf->data_len);
967 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
970 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
973 (jobdescr)->seq_in.command.word |= 0x01000000;
978 static inline struct caam_jr_op_ctx *
979 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
981 struct rte_crypto_sym_op *sym = op->sym;
982 struct caam_jr_op_ctx *ctx;
983 struct sec4_sg_entry *sg;
984 rte_iova_t src_start_addr, dst_start_addr;
986 uint64_t sdesc_offset;
987 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
989 struct sec_job_descriptor_t *jobdescr;
991 ctx = caam_jr_alloc_ctx(ses);
997 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
999 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1001 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1003 dst_start_addr = src_start_addr;
1005 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1007 SEC_JD_INIT(jobdescr);
1008 SEC_JD_SET_SD(jobdescr,
1009 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1010 cdb->sh_hdr.hi.field.idlen);
1013 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1014 sym->m_src->data_off, sym->cipher.data.offset,
1015 sym->cipher.data.length, ses->iv.length);
1018 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1019 sym->cipher.data.offset,
1020 sym->cipher.data.length + ses->iv.length);
1024 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1025 sym->cipher.data.length + ses->iv.length);
1026 /*enabling sg bit */
1027 (jobdescr)->seq_in.command.word |= 0x01000000;
1029 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1030 sg->len = cpu_to_caam32(ses->iv.length);
1033 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1034 sg->len = cpu_to_caam32(sym->cipher.data.length);
1036 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1041 /* For decapsulation:
1043 * +----+----------------+--------------------------------+-----+
1044 * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1045 * +----+----------------+--------------------------------+-----+
1047 * +----+--------------------------+
1048 * | Decrypted & authenticated data |
1049 * +----+--------------------------+
1052 static inline struct caam_jr_op_ctx *
1053 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1055 struct rte_crypto_sym_op *sym = op->sym;
1056 struct caam_jr_op_ctx *ctx;
1057 struct sec4_sg_entry *sg, *out_sg, *in_sg;
1058 struct rte_mbuf *mbuf;
1059 uint32_t length = 0;
1060 struct sec_cdb *cdb;
1061 uint64_t sdesc_offset;
1063 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1065 struct sec_job_descriptor_t *jobdescr;
1066 uint16_t auth_hdr_len = sym->cipher.data.offset -
1067 sym->auth.data.offset;
1068 uint16_t auth_tail_len = sym->auth.data.length -
1069 sym->cipher.data.length - auth_hdr_len;
1070 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1074 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1077 req_segs = mbuf->nb_segs * 2 + 3;
1080 if (req_segs > MAX_SG_ENTRIES) {
1081 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1086 ctx = caam_jr_alloc_ctx(ses);
1092 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1094 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1096 SEC_JD_INIT(jobdescr);
1097 SEC_JD_SET_SD(jobdescr,
1098 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1099 cdb->sh_hdr.hi.field.idlen);
1107 out_sg = &ctx->sg[0];
1109 length = sym->auth.data.length + ses->digest_length;
1111 length = sym->auth.data.length;
1116 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1117 + sym->auth.data.offset);
1118 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1120 /* Successive segs */
1124 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1125 sg->len = cpu_to_caam32(mbuf->data_len);
1129 if (is_encode(ses)) {
1130 /* set auth output */
1132 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1133 sg->len = cpu_to_caam32(ses->digest_length);
1136 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1138 SEC_JD_SET_OUT_PTR(jobdescr,
1139 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1141 (jobdescr)->seq_out.command.word |= 0x01000000;
1148 length = ses->iv.length + sym->auth.data.length;
1150 length = ses->iv.length + sym->auth.data.length
1151 + ses->digest_length;
1153 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1154 sg->len = cpu_to_caam32(ses->iv.length);
1158 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1159 + sym->auth.data.offset);
1160 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1162 /* Successive segs */
1166 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1167 sg->len = cpu_to_caam32(mbuf->data_len);
1171 if (is_decode(ses)) {
1173 rte_memcpy(ctx->digest, sym->auth.digest.data,
1174 ses->digest_length);
1175 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1176 sg->len = cpu_to_caam32(ses->digest_length);
1179 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1181 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1184 (jobdescr)->seq_in.command.word |= 0x01000000;
1185 /* Auth_only_len is set as 0 in descriptor and it is
1186 * overwritten here in the jd which will update
1191 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1196 static inline struct caam_jr_op_ctx *
1197 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1199 struct rte_crypto_sym_op *sym = op->sym;
1200 struct caam_jr_op_ctx *ctx;
1201 struct sec4_sg_entry *sg;
1202 rte_iova_t src_start_addr, dst_start_addr;
1203 uint32_t length = 0;
1204 struct sec_cdb *cdb;
1205 uint64_t sdesc_offset;
1206 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1208 struct sec_job_descriptor_t *jobdescr;
1209 uint16_t auth_hdr_len = sym->cipher.data.offset -
1210 sym->auth.data.offset;
1211 uint16_t auth_tail_len = sym->auth.data.length -
1212 sym->cipher.data.length - auth_hdr_len;
1213 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1215 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1217 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1219 dst_start_addr = src_start_addr;
1221 ctx = caam_jr_alloc_ctx(ses);
1227 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1229 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1231 SEC_JD_INIT(jobdescr);
1232 SEC_JD_SET_SD(jobdescr,
1233 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1234 cdb->sh_hdr.hi.field.idlen);
1238 if (is_encode(ses)) {
1239 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1240 sg->len = cpu_to_caam32(ses->iv.length);
1241 length += ses->iv.length;
1244 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1245 sg->len = cpu_to_caam32(sym->auth.data.length);
1246 length += sym->auth.data.length;
1248 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1250 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1251 sg->len = cpu_to_caam32(ses->iv.length);
1252 length += ses->iv.length;
1255 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1256 sg->len = cpu_to_caam32(sym->auth.data.length);
1257 length += sym->auth.data.length;
1259 rte_memcpy(ctx->digest, sym->auth.digest.data,
1260 ses->digest_length);
1262 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1263 sg->len = cpu_to_caam32(ses->digest_length);
1264 length += ses->digest_length;
1266 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1269 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1272 (jobdescr)->seq_in.command.word |= 0x01000000;
1277 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1278 sg->len = cpu_to_caam32(sym->cipher.data.length);
1279 length = sym->cipher.data.length;
1281 if (is_encode(ses)) {
1282 /* set auth output */
1284 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1285 sg->len = cpu_to_caam32(ses->digest_length);
1286 length += ses->digest_length;
1289 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1291 SEC_JD_SET_OUT_PTR(jobdescr,
1292 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1294 (jobdescr)->seq_out.command.word |= 0x01000000;
1296 /* Auth_only_len is set as 0 in descriptor and it is
1297 * overwritten here in the jd which will update
1302 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1307 static inline struct caam_jr_op_ctx *
1308 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1310 struct rte_crypto_sym_op *sym = op->sym;
1311 struct caam_jr_op_ctx *ctx = NULL;
1312 phys_addr_t src_start_addr, dst_start_addr;
1313 struct sec_cdb *cdb;
1314 uint64_t sdesc_offset;
1315 struct sec_job_descriptor_t *jobdescr;
1317 ctx = caam_jr_alloc_ctx(ses);
1322 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1324 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1326 dst_start_addr = src_start_addr;
1329 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1331 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1333 SEC_JD_INIT(jobdescr);
1334 SEC_JD_SET_SD(jobdescr,
1335 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1336 cdb->sh_hdr.hi.field.idlen);
1339 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1340 sym->m_src->buf_len - sym->m_src->data_off);
1342 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1343 sym->m_src->pkt_len);
1344 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1350 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1352 struct sec_job_ring_t *ring = qp->ring;
1353 struct caam_jr_session *ses;
1354 struct caam_jr_op_ctx *ctx = NULL;
1355 struct sec_job_descriptor_t *jobdescr __rte_unused;
1357 switch (op->sess_type) {
1358 case RTE_CRYPTO_OP_WITH_SESSION:
1359 ses = (struct caam_jr_session *)
1360 get_sym_session_private_data(op->sym->session,
1361 cryptodev_driver_id);
1363 case RTE_CRYPTO_OP_SECURITY_SESSION:
1364 ses = (struct caam_jr_session *)
1365 get_sec_session_private_data(
1366 op->sym->sec_session);
1369 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1374 if (unlikely(!ses->qp || ses->qp != qp)) {
1375 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1377 caam_jr_prep_cdb(ses);
1380 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1381 if (is_auth_cipher(ses))
1382 ctx = build_cipher_auth(op, ses);
1383 else if (is_aead(ses))
1385 else if (is_auth_only(ses))
1386 ctx = build_auth_only(op, ses);
1387 else if (is_cipher_only(ses))
1388 ctx = build_cipher_only(op, ses);
1389 else if (is_proto_ipsec(ses))
1390 ctx = build_proto(op, ses);
1392 if (is_auth_cipher(ses))
1393 ctx = build_cipher_auth_sg(op, ses);
1394 else if (is_aead(ses))
1396 else if (is_auth_only(ses))
1397 ctx = build_auth_only_sg(op, ses);
1398 else if (is_cipher_only(ses))
1399 ctx = build_cipher_only_sg(op, ses);
1402 if (unlikely(!ctx)) {
1404 CAAM_JR_ERR("not supported sec op");
1409 rte_hexdump(stdout, "DECODE",
1410 rte_pktmbuf_mtod(op->sym->m_src, void *),
1411 rte_pktmbuf_data_len(op->sym->m_src));
1413 rte_hexdump(stdout, "ENCODE",
1414 rte_pktmbuf_mtod(op->sym->m_src, void *),
1415 rte_pktmbuf_data_len(op->sym->m_src));
1417 printf("\n JD before conversion\n");
1418 for (int i = 0; i < 12; i++)
1419 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1422 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1423 ring, ring->pidx, ring->cidx);
1425 /* todo - do we want to retry */
1426 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1427 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1428 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1429 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1430 caam_jr_op_ending(ctx);
1435 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1436 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1438 jobdescr->deschdr.command.word =
1439 cpu_to_caam32(jobdescr->deschdr.command.word);
1440 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1441 jobdescr->seq_out.command.word =
1442 cpu_to_caam32(jobdescr->seq_out.command.word);
1443 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1444 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1445 jobdescr->seq_in.command.word =
1446 cpu_to_caam32(jobdescr->seq_in.command.word);
1447 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1448 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1449 jobdescr->load_dpovrd.command.word =
1450 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1451 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1454 /* Set ptr in input ring to current descriptor */
1455 sec_write_addr(&ring->input_ring[ring->pidx],
1456 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1459 /* Notify HW that a new job is enqueued */
1460 hw_enqueue_desc_on_job_ring(ring);
1462 /* increment the producer index for the current job ring */
1463 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1469 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1472 /* Function to transmit the frames to given device and queuepair */
1475 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1476 uint16_t num_tx = 0;
1477 /*Prepare each packet which is to be sent*/
1478 for (loop = 0; loop < nb_ops; loop++) {
1479 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1484 jr_qp->tx_pkts += num_tx;
1489 /* Release queue pair */
1491 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1494 struct sec_job_ring_t *internals;
1495 struct caam_jr_qp *qp = NULL;
1497 PMD_INIT_FUNC_TRACE();
1498 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1500 internals = dev->data->dev_private;
1501 if (qp_id >= internals->max_nb_queue_pairs) {
1502 CAAM_JR_ERR("Max supported qpid %d",
1503 internals->max_nb_queue_pairs);
1507 qp = &internals->qps[qp_id];
1509 dev->data->queue_pairs[qp_id] = NULL;
1514 /* Setup a queue pair */
1516 caam_jr_queue_pair_setup(
1517 struct rte_cryptodev *dev, uint16_t qp_id,
1518 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1519 __rte_unused int socket_id)
1521 struct sec_job_ring_t *internals;
1522 struct caam_jr_qp *qp = NULL;
1524 PMD_INIT_FUNC_TRACE();
1525 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1527 internals = dev->data->dev_private;
1528 if (qp_id >= internals->max_nb_queue_pairs) {
1529 CAAM_JR_ERR("Max supported qpid %d",
1530 internals->max_nb_queue_pairs);
1534 qp = &internals->qps[qp_id];
1535 qp->ring = internals;
1536 dev->data->queue_pairs[qp_id] = qp;
1541 /* Return the number of allocated queue pairs */
1543 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1545 PMD_INIT_FUNC_TRACE();
1547 return dev->data->nb_queue_pairs;
1550 /* Returns the size of the aesni gcm session structure */
1552 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1554 PMD_INIT_FUNC_TRACE();
1556 return sizeof(struct caam_jr_session);
1560 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1561 struct rte_crypto_sym_xform *xform,
1562 struct caam_jr_session *session)
1564 session->cipher_alg = xform->cipher.algo;
1565 session->iv.length = xform->cipher.iv.length;
1566 session->iv.offset = xform->cipher.iv.offset;
1567 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1568 RTE_CACHE_LINE_SIZE);
1569 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1570 CAAM_JR_ERR("No Memory for cipher key\n");
1573 session->cipher_key.length = xform->cipher.key.length;
1575 memcpy(session->cipher_key.data, xform->cipher.key.data,
1576 xform->cipher.key.length);
1577 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1584 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1585 struct rte_crypto_sym_xform *xform,
1586 struct caam_jr_session *session)
1588 session->auth_alg = xform->auth.algo;
1589 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1590 RTE_CACHE_LINE_SIZE);
1591 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1592 CAAM_JR_ERR("No Memory for auth key\n");
1595 session->auth_key.length = xform->auth.key.length;
1596 session->digest_length = xform->auth.digest_length;
1598 memcpy(session->auth_key.data, xform->auth.key.data,
1599 xform->auth.key.length);
1600 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1607 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1608 struct rte_crypto_sym_xform *xform,
1609 struct caam_jr_session *session)
1611 session->aead_alg = xform->aead.algo;
1612 session->iv.length = xform->aead.iv.length;
1613 session->iv.offset = xform->aead.iv.offset;
1614 session->auth_only_len = xform->aead.aad_length;
1615 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1616 RTE_CACHE_LINE_SIZE);
1617 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1618 CAAM_JR_ERR("No Memory for aead key\n");
1621 session->aead_key.length = xform->aead.key.length;
1622 session->digest_length = xform->aead.digest_length;
1624 memcpy(session->aead_key.data, xform->aead.key.data,
1625 xform->aead.key.length);
1626 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1633 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1634 struct rte_crypto_sym_xform *xform, void *sess)
1636 struct sec_job_ring_t *internals = dev->data->dev_private;
1637 struct caam_jr_session *session = sess;
1639 PMD_INIT_FUNC_TRACE();
1641 if (unlikely(sess == NULL)) {
1642 CAAM_JR_ERR("invalid session struct");
1646 /* Default IV length = 0 */
1647 session->iv.length = 0;
1650 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1651 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1652 caam_jr_cipher_init(dev, xform, session);
1654 /* Authentication Only */
1655 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1656 xform->next == NULL) {
1657 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1658 caam_jr_auth_init(dev, xform, session);
1660 /* Cipher then Authenticate */
1661 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1662 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1663 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1664 caam_jr_cipher_init(dev, xform, session);
1665 caam_jr_auth_init(dev, xform->next, session);
1667 CAAM_JR_ERR("Not supported: Auth then Cipher");
1671 /* Authenticate then Cipher */
1672 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1673 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1674 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1675 caam_jr_auth_init(dev, xform, session);
1676 caam_jr_cipher_init(dev, xform->next, session);
1678 CAAM_JR_ERR("Not supported: Auth then Cipher");
1682 /* AEAD operation for AES-GCM kind of Algorithms */
1683 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1684 xform->next == NULL) {
1685 caam_jr_aead_init(dev, xform, session);
1688 CAAM_JR_ERR("Invalid crypto type");
1691 session->ctx_pool = internals->ctx_pool;
1696 rte_free(session->cipher_key.data);
1697 rte_free(session->auth_key.data);
1698 memset(session, 0, sizeof(struct caam_jr_session));
1704 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1705 struct rte_crypto_sym_xform *xform,
1706 struct rte_cryptodev_sym_session *sess,
1707 struct rte_mempool *mempool)
1709 void *sess_private_data;
1712 PMD_INIT_FUNC_TRACE();
1714 if (rte_mempool_get(mempool, &sess_private_data)) {
1715 CAAM_JR_ERR("Couldn't get object from session mempool");
1719 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1720 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1722 CAAM_JR_ERR("failed to configure session parameters");
1723 /* Return session to mempool */
1724 rte_mempool_put(mempool, sess_private_data);
1728 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1733 /* Clear the memory of session so it doesn't leave key material behind */
1735 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1736 struct rte_cryptodev_sym_session *sess)
1738 uint8_t index = dev->driver_id;
1739 void *sess_priv = get_sym_session_private_data(sess, index);
1740 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1742 PMD_INIT_FUNC_TRACE();
1745 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1747 rte_free(s->cipher_key.data);
1748 rte_free(s->auth_key.data);
1749 memset(s, 0, sizeof(struct caam_jr_session));
1750 set_sym_session_private_data(sess, index, NULL);
1751 rte_mempool_put(sess_mp, sess_priv);
1756 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1757 struct rte_security_session_conf *conf,
1760 struct sec_job_ring_t *internals = dev->data->dev_private;
1761 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1762 struct rte_crypto_auth_xform *auth_xform;
1763 struct rte_crypto_cipher_xform *cipher_xform;
1764 struct caam_jr_session *session = (struct caam_jr_session *)sess;
1766 PMD_INIT_FUNC_TRACE();
1768 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1769 cipher_xform = &conf->crypto_xform->cipher;
1770 auth_xform = &conf->crypto_xform->next->auth;
1772 auth_xform = &conf->crypto_xform->auth;
1773 cipher_xform = &conf->crypto_xform->next->cipher;
1775 session->proto_alg = conf->protocol;
1776 session->cipher_key.data = rte_zmalloc(NULL,
1777 cipher_xform->key.length,
1778 RTE_CACHE_LINE_SIZE);
1779 if (session->cipher_key.data == NULL &&
1780 cipher_xform->key.length > 0) {
1781 CAAM_JR_ERR("No Memory for cipher key\n");
1785 session->cipher_key.length = cipher_xform->key.length;
1786 session->auth_key.data = rte_zmalloc(NULL,
1787 auth_xform->key.length,
1788 RTE_CACHE_LINE_SIZE);
1789 if (session->auth_key.data == NULL &&
1790 auth_xform->key.length > 0) {
1791 CAAM_JR_ERR("No Memory for auth key\n");
1792 rte_free(session->cipher_key.data);
1795 session->auth_key.length = auth_xform->key.length;
1796 memcpy(session->cipher_key.data, cipher_xform->key.data,
1797 cipher_xform->key.length);
1798 memcpy(session->auth_key.data, auth_xform->key.data,
1799 auth_xform->key.length);
1801 switch (auth_xform->algo) {
1802 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1803 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1805 case RTE_CRYPTO_AUTH_MD5_HMAC:
1806 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1808 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1809 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1811 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1812 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1814 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1815 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1817 case RTE_CRYPTO_AUTH_AES_CMAC:
1818 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1820 case RTE_CRYPTO_AUTH_NULL:
1821 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1823 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1824 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1825 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1826 case RTE_CRYPTO_AUTH_SHA1:
1827 case RTE_CRYPTO_AUTH_SHA256:
1828 case RTE_CRYPTO_AUTH_SHA512:
1829 case RTE_CRYPTO_AUTH_SHA224:
1830 case RTE_CRYPTO_AUTH_SHA384:
1831 case RTE_CRYPTO_AUTH_MD5:
1832 case RTE_CRYPTO_AUTH_AES_GMAC:
1833 case RTE_CRYPTO_AUTH_KASUMI_F9:
1834 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1835 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1836 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1840 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1845 switch (cipher_xform->algo) {
1846 case RTE_CRYPTO_CIPHER_AES_CBC:
1847 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1849 case RTE_CRYPTO_CIPHER_3DES_CBC:
1850 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1852 case RTE_CRYPTO_CIPHER_AES_CTR:
1853 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1855 case RTE_CRYPTO_CIPHER_NULL:
1856 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1857 case RTE_CRYPTO_CIPHER_3DES_ECB:
1858 case RTE_CRYPTO_CIPHER_AES_ECB:
1859 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1860 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1861 cipher_xform->algo);
1864 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1865 cipher_xform->algo);
1869 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1870 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1871 sizeof(session->ip4_hdr));
1872 session->ip4_hdr.ip_v = IPVERSION;
1873 session->ip4_hdr.ip_hl = 5;
1874 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1875 sizeof(session->ip4_hdr));
1876 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1877 session->ip4_hdr.ip_id = 0;
1878 session->ip4_hdr.ip_off = 0;
1879 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1880 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1881 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1883 session->ip4_hdr.ip_sum = 0;
1884 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1885 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1886 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1887 (void *)&session->ip4_hdr,
1890 session->encap_pdb.options =
1891 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1892 PDBOPTS_ESP_OIHI_PDB_INL |
1894 PDBHMO_ESP_ENCAP_DTTL;
1895 if (ipsec_xform->options.esn)
1896 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1897 session->encap_pdb.spi = ipsec_xform->spi;
1898 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1900 session->dir = DIR_ENC;
1901 } else if (ipsec_xform->direction ==
1902 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1903 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1904 session->decap_pdb.options = sizeof(struct ip) << 16;
1905 if (ipsec_xform->options.esn)
1906 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1907 session->dir = DIR_DEC;
1910 session->ctx_pool = internals->ctx_pool;
1914 rte_free(session->auth_key.data);
1915 rte_free(session->cipher_key.data);
1916 memset(session, 0, sizeof(struct caam_jr_session));
1921 caam_jr_security_session_create(void *dev,
1922 struct rte_security_session_conf *conf,
1923 struct rte_security_session *sess,
1924 struct rte_mempool *mempool)
1926 void *sess_private_data;
1927 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1930 if (rte_mempool_get(mempool, &sess_private_data)) {
1931 CAAM_JR_ERR("Couldn't get object from session mempool");
1935 switch (conf->protocol) {
1936 case RTE_SECURITY_PROTOCOL_IPSEC:
1937 ret = caam_jr_set_ipsec_session(cdev, conf,
1940 case RTE_SECURITY_PROTOCOL_MACSEC:
1946 CAAM_JR_ERR("failed to configure session parameters");
1947 /* Return session to mempool */
1948 rte_mempool_put(mempool, sess_private_data);
1952 set_sec_session_private_data(sess, sess_private_data);
1957 /* Clear the memory of session so it doesn't leave key material behind */
1959 caam_jr_security_session_destroy(void *dev __rte_unused,
1960 struct rte_security_session *sess)
1962 PMD_INIT_FUNC_TRACE();
1963 void *sess_priv = get_sec_session_private_data(sess);
1965 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1968 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1970 rte_free(s->cipher_key.data);
1971 rte_free(s->auth_key.data);
1972 memset(sess, 0, sizeof(struct caam_jr_session));
1973 set_sec_session_private_data(sess, NULL);
1974 rte_mempool_put(sess_mp, sess_priv);
1981 caam_jr_dev_configure(struct rte_cryptodev *dev,
1982 struct rte_cryptodev_config *config __rte_unused)
1985 struct sec_job_ring_t *internals;
1987 PMD_INIT_FUNC_TRACE();
1989 internals = dev->data->dev_private;
1990 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1991 if (!internals->ctx_pool) {
1992 internals->ctx_pool = rte_mempool_create((const char *)str,
1994 sizeof(struct caam_jr_op_ctx),
1995 CTX_POOL_CACHE_SIZE, 0,
1996 NULL, NULL, NULL, NULL,
1998 if (!internals->ctx_pool) {
1999 CAAM_JR_ERR("%s create failed\n", str);
2003 CAAM_JR_INFO("mempool already created for dev_id : %d",
2010 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2012 PMD_INIT_FUNC_TRACE();
2017 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2019 PMD_INIT_FUNC_TRACE();
2023 caam_jr_dev_close(struct rte_cryptodev *dev)
2025 struct sec_job_ring_t *internals;
2027 PMD_INIT_FUNC_TRACE();
2032 internals = dev->data->dev_private;
2033 rte_mempool_free(internals->ctx_pool);
2034 internals->ctx_pool = NULL;
2040 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2041 struct rte_cryptodev_info *info)
2043 struct sec_job_ring_t *internals = dev->data->dev_private;
2045 PMD_INIT_FUNC_TRACE();
2047 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2048 info->feature_flags = dev->feature_flags;
2049 info->capabilities = caam_jr_get_cryptodev_capabilities();
2050 info->sym.max_nb_sessions = internals->max_nb_sessions;
2051 info->driver_id = cryptodev_driver_id;
2055 static struct rte_cryptodev_ops caam_jr_ops = {
2056 .dev_configure = caam_jr_dev_configure,
2057 .dev_start = caam_jr_dev_start,
2058 .dev_stop = caam_jr_dev_stop,
2059 .dev_close = caam_jr_dev_close,
2060 .dev_infos_get = caam_jr_dev_infos_get,
2061 .stats_get = caam_jr_stats_get,
2062 .stats_reset = caam_jr_stats_reset,
2063 .queue_pair_setup = caam_jr_queue_pair_setup,
2064 .queue_pair_release = caam_jr_queue_pair_release,
2065 .queue_pair_count = caam_jr_queue_pair_count,
2066 .sym_session_get_size = caam_jr_sym_session_get_size,
2067 .sym_session_configure = caam_jr_sym_session_configure,
2068 .sym_session_clear = caam_jr_sym_session_clear
2071 static struct rte_security_ops caam_jr_security_ops = {
2072 .session_create = caam_jr_security_session_create,
2073 .session_update = NULL,
2074 .session_stats_get = NULL,
2075 .session_destroy = caam_jr_security_session_destroy,
2076 .set_pkt_metadata = NULL,
2077 .capabilities_get = caam_jr_get_security_capabilities
2080 /* @brief Flush job rings of any processed descs.
2081 * The processed descs are silently dropped,
2082 * WITHOUT being notified to UA.
2085 close_job_ring(struct sec_job_ring_t *job_ring)
2087 if (job_ring->irq_fd) {
2088 /* Producer index is frozen. If consumer index is not equal
2089 * with producer index, then we have descs to flush.
2091 while (job_ring->pidx != job_ring->cidx)
2092 hw_flush_job_ring(job_ring, false, NULL);
2094 /* free the uio job ring */
2095 free_job_ring(job_ring->irq_fd);
2096 job_ring->irq_fd = 0;
2097 caam_jr_dma_free(job_ring->input_ring);
2098 caam_jr_dma_free(job_ring->output_ring);
2103 /** @brief Release the software and hardware resources tied to a job ring.
2104 * @param [in] job_ring The job ring
2106 * @retval 0 for success
2107 * @retval -1 for error
2110 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2114 PMD_INIT_FUNC_TRACE();
2115 ASSERT(job_ring != NULL);
2116 ret = hw_shutdown_job_ring(job_ring);
2117 SEC_ASSERT(ret == 0, ret,
2118 "Failed to shutdown hardware job ring %p",
2121 if (job_ring->coalescing_en)
2122 hw_job_ring_disable_coalescing(job_ring);
2124 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2125 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2126 SEC_ASSERT(ret == 0, ret,
2127 "Failed to disable irqs for job ring %p",
2135 * @brief Release the resources used by the SEC user space driver.
2137 * Reset and release SEC's job rings indicated by the User Application at
2138 * init_job_ring() and free any memory allocated internally.
2139 * Call once during application tear down.
2141 * @note In case there are any descriptors in-flight (descriptors received by
2142 * SEC driver for processing and for which no response was yet provided to UA),
2143 * the descriptors are discarded without any notifications to User Application.
2145 * @retval ::0 is returned for a successful execution
2146 * @retval ::-1 is returned if SEC driver release is in progress
2149 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2151 struct sec_job_ring_t *internals;
2153 PMD_INIT_FUNC_TRACE();
2157 internals = dev->data->dev_private;
2158 rte_free(dev->security_ctx);
2160 /* If any descriptors in flight , poll and wait
2161 * until all descriptors are received and silently discarded.
2164 shutdown_job_ring(internals);
2165 close_job_ring(internals);
2166 rte_mempool_free(internals->ctx_pool);
2169 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2171 /* last caam jr instance) */
2172 if (g_job_rings_no == 0)
2173 g_driver_state = SEC_DRIVER_STATE_IDLE;
2178 /* @brief Initialize the software and hardware resources tied to a job ring.
2179 * @param [in] jr_mode; Model to be used by SEC Driver to receive
2180 * notifications from SEC. Can be either
2181 * of the three: #SEC_NOTIFICATION_TYPE_NAPI
2182 * #SEC_NOTIFICATION_TYPE_IRQ or
2183 * #SEC_NOTIFICATION_TYPE_POLL
2184 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
2185 * startup. Used only when #SEC_NOTIFICATION_TYPE
2186 * is set to #SEC_NOTIFICATION_TYPE_NAPI.
2187 * @param [in] irq_coalescing_timer This value determines the maximum
2188 * amount of time after processing a
2189 * descriptor before raising an interrupt.
2190 * @param [in] irq_coalescing_count This value determines how many
2191 * descriptors are completed before
2192 * raising an interrupt.
2193 * @param [in] reg_base_addr, The job ring base address register
2194 * @param [in] irq_id The job ring interrupt identification number.
2195 * @retval job_ring_handle for successful job ring configuration
2196 * @retval NULL on error
2200 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2202 struct sec_job_ring_t *job_ring = NULL;
2204 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2206 int irq_coalescing_timer = 0;
2207 int irq_coalescing_count = 0;
2209 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2210 if (g_job_rings[i].irq_fd == 0) {
2211 job_ring = &g_job_rings[i];
2216 if (job_ring == NULL) {
2217 CAAM_JR_ERR("No free job ring\n");
2221 job_ring->register_base_addr = reg_base_addr;
2222 job_ring->jr_mode = jr_mode;
2223 job_ring->napi_mode = 0;
2224 job_ring->irq_fd = irq_id;
2226 /* Allocate mem for input and output ring */
2228 /* Allocate memory for input ring */
2229 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2230 SEC_DMA_MEM_INPUT_RING_SIZE);
2231 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2233 /* Allocate memory for output ring */
2234 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2235 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2236 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2238 /* Reset job ring in SEC hw and configure job ring registers */
2239 ret = hw_reset_job_ring(job_ring);
2241 CAAM_JR_ERR("Failed to reset hardware job ring");
2245 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2246 /* When SEC US driver works in NAPI mode, the UA can select
2247 * if the driver starts with IRQs on or off.
2249 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2250 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2252 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2254 CAAM_JR_ERR("Failed to enable irqs for job ring");
2258 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2259 /* When SEC US driver works in pure interrupt mode,
2260 * IRQ's are always enabled.
2262 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2264 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2266 CAAM_JR_ERR("Failed to enable irqs for job ring");
2270 if (irq_coalescing_timer || irq_coalescing_count) {
2271 hw_job_ring_set_coalescing_param(job_ring,
2272 irq_coalescing_timer,
2273 irq_coalescing_count);
2275 hw_job_ring_enable_coalescing(job_ring);
2276 job_ring->coalescing_en = 1;
2279 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2280 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2281 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2285 caam_jr_dma_free(job_ring->output_ring);
2286 caam_jr_dma_free(job_ring->input_ring);
2292 caam_jr_dev_init(const char *name,
2293 struct rte_vdev_device *vdev,
2294 struct rte_cryptodev_pmd_init_params *init_params)
2296 struct rte_cryptodev *dev;
2297 struct rte_security_ctx *security_instance;
2298 struct uio_job_ring *job_ring;
2299 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2301 PMD_INIT_FUNC_TRACE();
2303 /* Validate driver state */
2304 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2305 g_job_rings_max = sec_configure();
2306 if (!g_job_rings_max) {
2307 CAAM_JR_ERR("No job ring detected on UIO !!!!");
2310 /* Update driver state */
2311 g_driver_state = SEC_DRIVER_STATE_STARTED;
2314 if (g_job_rings_no >= g_job_rings_max) {
2315 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2320 job_ring = config_job_ring();
2321 if (job_ring == NULL) {
2322 CAAM_JR_ERR("failed to create job ring");
2326 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2328 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2330 CAAM_JR_ERR("failed to create cryptodev vdev");
2333 /*TODO free it during teardown*/
2334 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2337 if (!dev->data->dev_private) {
2338 CAAM_JR_ERR("Ring memory allocation failed\n");
2342 dev->driver_id = cryptodev_driver_id;
2343 dev->dev_ops = &caam_jr_ops;
2345 /* register rx/tx burst functions for data path */
2346 dev->dequeue_burst = caam_jr_dequeue_burst;
2347 dev->enqueue_burst = caam_jr_enqueue_burst;
2348 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2349 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2350 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2351 RTE_CRYPTODEV_FF_SECURITY |
2352 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2353 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2354 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2355 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2356 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2358 /* For secondary processes, we don't initialise any further as primary
2359 * has already done this work. Only check we don't need a different
2362 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2363 CAAM_JR_WARN("Device already init by primary process");
2367 /*TODO free it during teardown*/
2368 security_instance = rte_malloc("caam_jr",
2369 sizeof(struct rte_security_ctx), 0);
2370 if (security_instance == NULL) {
2371 CAAM_JR_ERR("memory allocation failed\n");
2372 //todo error handling.
2376 security_instance->device = (void *)dev;
2377 security_instance->ops = &caam_jr_security_ops;
2378 security_instance->sess_cnt = 0;
2379 dev->security_ctx = security_instance;
2381 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2386 caam_jr_dev_uninit(dev);
2387 rte_cryptodev_pmd_release_device(dev);
2389 free_job_ring(job_ring->uio_fd);
2391 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2397 /** Initialise CAAM JR crypto device */
2399 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2401 struct rte_cryptodev_pmd_init_params init_params = {
2403 sizeof(struct sec_job_ring_t),
2405 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2408 const char *input_args;
2410 name = rte_vdev_device_name(vdev);
2414 input_args = rte_vdev_device_args(vdev);
2415 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2417 /* if sec device version is not configured */
2418 if (!rta_get_sec_era()) {
2419 const struct device_node *caam_node;
2421 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2422 const uint32_t *prop = of_get_property(caam_node,
2427 INTL_SEC_ERA(cpu_to_caam32(*prop)));
2432 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2433 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2435 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2440 return caam_jr_dev_init(name, vdev, &init_params);
2443 /** Uninitialise CAAM JR crypto device */
2445 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2447 struct rte_cryptodev *cryptodev;
2450 name = rte_vdev_device_name(vdev);
2454 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2455 if (cryptodev == NULL)
2458 caam_jr_dev_uninit(cryptodev);
2460 return rte_cryptodev_pmd_destroy(cryptodev);
2463 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2464 .probe = cryptodev_caam_jr_probe,
2465 .remove = cryptodev_caam_jr_remove
2468 static struct cryptodev_driver caam_jr_crypto_drv;
2470 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2471 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2472 "max_nb_queue_pairs=<int>"
2474 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2475 cryptodev_driver_id);
2477 RTE_INIT(caam_jr_init_log)
2479 caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2480 if (caam_jr_logtype >= 0)
2481 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);