1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2019 NXP
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
36 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
40 enum rta_sec_era rta_sec_era;
42 /* Lists the states possible for the SEC user space driver. */
43 enum sec_driver_state_e {
44 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
45 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
46 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
49 /* Job rings used for communication with SEC HW */
50 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
52 /* The current state of SEC user space driver */
53 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
55 /* The number of job rings used by SEC user space driver */
56 static int g_job_rings_no;
57 static int g_job_rings_max;
59 struct sec_outring_entry {
60 phys_addr_t desc; /* Pointer to completed descriptor */
61 uint32_t status; /* Status for completed descriptor */
64 /* virtual address conversin when mempool support is available for ctx */
65 static inline phys_addr_t
66 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
68 return (size_t)vaddr - ctx->vtop_offset;
72 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
74 /* report op status to sym->op and then free the ctx memory */
75 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
78 static inline struct caam_jr_op_ctx *
79 caam_jr_alloc_ctx(struct caam_jr_session *ses)
81 struct caam_jr_op_ctx *ctx;
84 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
86 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
90 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
91 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
92 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
93 * each packet, memset is costlier than dcbz_64().
95 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
96 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
97 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
98 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
100 ctx->ctx_pool = ses->ctx_pool;
101 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
107 void caam_jr_stats_get(struct rte_cryptodev *dev,
108 struct rte_cryptodev_stats *stats)
110 struct caam_jr_qp **qp = (struct caam_jr_qp **)
111 dev->data->queue_pairs;
114 PMD_INIT_FUNC_TRACE();
116 CAAM_JR_ERR("Invalid stats ptr NULL");
119 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
121 CAAM_JR_WARN("Uninitialised queue pair");
125 stats->enqueued_count += qp[i]->tx_pkts;
126 stats->dequeued_count += qp[i]->rx_pkts;
127 stats->enqueue_err_count += qp[i]->tx_errs;
128 stats->dequeue_err_count += qp[i]->rx_errs;
129 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
130 "\n\tTX Ring Full = %" PRIu64,
132 qp[i]->tx_ring_full);
137 void caam_jr_stats_reset(struct rte_cryptodev *dev)
140 struct caam_jr_qp **qp = (struct caam_jr_qp **)
141 (dev->data->queue_pairs);
143 PMD_INIT_FUNC_TRACE();
144 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
146 CAAM_JR_WARN("Uninitialised queue pair");
151 qp[i]->rx_poll_err = 0;
154 qp[i]->tx_ring_full = 0;
159 is_cipher_only(struct caam_jr_session *ses)
161 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
162 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
166 is_auth_only(struct caam_jr_session *ses)
168 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
169 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
173 is_aead(struct caam_jr_session *ses)
175 return ((ses->cipher_alg == 0) &&
176 (ses->auth_alg == 0) &&
177 (ses->aead_alg != 0));
181 is_auth_cipher(struct caam_jr_session *ses)
183 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
184 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
185 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
189 is_proto_ipsec(struct caam_jr_session *ses)
191 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
195 is_encode(struct caam_jr_session *ses)
197 return ses->dir == DIR_ENC;
201 is_decode(struct caam_jr_session *ses)
203 return ses->dir == DIR_DEC;
207 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
209 switch (ses->auth_alg) {
210 case RTE_CRYPTO_AUTH_NULL:
211 ses->digest_length = 0;
213 case RTE_CRYPTO_AUTH_MD5_HMAC:
215 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
216 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
217 alginfo_a->algmode = OP_ALG_AAI_HMAC;
219 case RTE_CRYPTO_AUTH_SHA1_HMAC:
221 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
222 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
223 alginfo_a->algmode = OP_ALG_AAI_HMAC;
225 case RTE_CRYPTO_AUTH_SHA224_HMAC:
227 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
228 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
229 alginfo_a->algmode = OP_ALG_AAI_HMAC;
231 case RTE_CRYPTO_AUTH_SHA256_HMAC:
233 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
234 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
235 alginfo_a->algmode = OP_ALG_AAI_HMAC;
237 case RTE_CRYPTO_AUTH_SHA384_HMAC:
239 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
240 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
241 alginfo_a->algmode = OP_ALG_AAI_HMAC;
243 case RTE_CRYPTO_AUTH_SHA512_HMAC:
245 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
246 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
247 alginfo_a->algmode = OP_ALG_AAI_HMAC;
250 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
255 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
257 switch (ses->cipher_alg) {
258 case RTE_CRYPTO_CIPHER_NULL:
260 case RTE_CRYPTO_CIPHER_AES_CBC:
262 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
263 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
264 alginfo_c->algmode = OP_ALG_AAI_CBC;
266 case RTE_CRYPTO_CIPHER_3DES_CBC:
268 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
269 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
270 alginfo_c->algmode = OP_ALG_AAI_CBC;
272 case RTE_CRYPTO_CIPHER_AES_CTR:
274 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
275 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
276 alginfo_c->algmode = OP_ALG_AAI_CTR;
279 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
284 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
286 switch (ses->aead_alg) {
287 case RTE_CRYPTO_AEAD_AES_GCM:
288 alginfo->algtype = OP_ALG_ALGSEL_AES;
289 alginfo->algmode = OP_ALG_AAI_GCM;
292 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
296 /* prepare command block of the session */
298 caam_jr_prep_cdb(struct caam_jr_session *ses)
300 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
301 int32_t shared_desc_len = 0;
304 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
311 caam_jr_dma_free(ses->cdb);
313 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
315 CAAM_JR_ERR("failed to allocate memory for cdb\n");
321 memset(cdb, 0, sizeof(struct sec_cdb));
323 if (is_cipher_only(ses)) {
324 caam_cipher_alg(ses, &alginfo_c);
325 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
326 CAAM_JR_ERR("not supported cipher alg");
331 alginfo_c.key = (size_t)ses->cipher_key.data;
332 alginfo_c.keylen = ses->cipher_key.length;
333 alginfo_c.key_enc_flags = 0;
334 alginfo_c.key_type = RTA_DATA_IMM;
336 shared_desc_len = cnstr_shdsc_blkcipher(
338 swap, SHR_NEVER, &alginfo_c,
342 } else if (is_auth_only(ses)) {
343 caam_auth_alg(ses, &alginfo_a);
344 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
345 CAAM_JR_ERR("not supported auth alg");
350 alginfo_a.key = (size_t)ses->auth_key.data;
351 alginfo_a.keylen = ses->auth_key.length;
352 alginfo_a.key_enc_flags = 0;
353 alginfo_a.key_type = RTA_DATA_IMM;
355 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
356 swap, SHR_NEVER, &alginfo_a,
359 } else if (is_aead(ses)) {
360 caam_aead_alg(ses, &alginfo);
361 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
362 CAAM_JR_ERR("not supported aead alg");
366 alginfo.key = (size_t)ses->aead_key.data;
367 alginfo.keylen = ses->aead_key.length;
368 alginfo.key_enc_flags = 0;
369 alginfo.key_type = RTA_DATA_IMM;
371 if (ses->dir == DIR_ENC)
372 shared_desc_len = cnstr_shdsc_gcm_encap(
373 cdb->sh_desc, true, swap,
378 shared_desc_len = cnstr_shdsc_gcm_decap(
379 cdb->sh_desc, true, swap,
384 caam_cipher_alg(ses, &alginfo_c);
385 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
386 CAAM_JR_ERR("not supported cipher alg");
391 alginfo_c.key = (size_t)ses->cipher_key.data;
392 alginfo_c.keylen = ses->cipher_key.length;
393 alginfo_c.key_enc_flags = 0;
394 alginfo_c.key_type = RTA_DATA_IMM;
396 caam_auth_alg(ses, &alginfo_a);
397 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
398 CAAM_JR_ERR("not supported auth alg");
403 alginfo_a.key = (size_t)ses->auth_key.data;
404 alginfo_a.keylen = ses->auth_key.length;
405 alginfo_a.key_enc_flags = 0;
406 alginfo_a.key_type = RTA_DATA_IMM;
408 cdb->sh_desc[0] = alginfo_c.keylen;
409 cdb->sh_desc[1] = alginfo_a.keylen;
410 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
412 (unsigned int *)cdb->sh_desc,
413 &cdb->sh_desc[2], 2);
416 CAAM_JR_ERR("Crypto: Incorrect key lengths");
420 if (cdb->sh_desc[2] & 1)
421 alginfo_c.key_type = RTA_DATA_IMM;
423 alginfo_c.key = (size_t)caam_jr_mem_vtop(
424 (void *)(size_t)alginfo_c.key);
425 alginfo_c.key_type = RTA_DATA_PTR;
427 if (cdb->sh_desc[2] & (1<<1))
428 alginfo_a.key_type = RTA_DATA_IMM;
430 alginfo_a.key = (size_t)caam_jr_mem_vtop(
431 (void *)(size_t)alginfo_a.key);
432 alginfo_a.key_type = RTA_DATA_PTR;
437 if (is_proto_ipsec(ses)) {
438 if (ses->dir == DIR_ENC) {
439 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
441 true, swap, SHR_SERIAL,
443 (uint8_t *)&ses->ip4_hdr,
444 &alginfo_c, &alginfo_a);
445 } else if (ses->dir == DIR_DEC) {
446 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
448 true, swap, SHR_SERIAL,
450 &alginfo_c, &alginfo_a);
453 /* Auth_only_len is overwritten in fd for each job */
454 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
455 true, swap, SHR_SERIAL,
456 &alginfo_c, &alginfo_a,
458 ses->digest_length, ses->dir);
462 if (shared_desc_len < 0) {
463 CAAM_JR_ERR("error in preparing command block");
464 return shared_desc_len;
468 SEC_DUMP_DESC(cdb->sh_desc);
471 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
476 /* @brief Poll the HW for already processed jobs in the JR
477 * and silently discard the available jobs or notify them to UA
478 * with indicated error code.
480 * @param [in,out] job_ring The job ring to poll.
481 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
482 * descriptors are to be discarded
483 * or notified to UA with given error_code.
484 * @param [out] notified_descs Number of notified descriptors. Can be NULL
485 * if do_notify is #FALSE
488 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
490 uint32_t *notified_descs)
492 int32_t jobs_no_to_discard = 0;
493 int32_t discarded_descs_no = 0;
495 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
496 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
498 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
500 /* Discard all jobs */
501 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
502 job_ring, job_ring->pidx, job_ring->cidx,
505 while (jobs_no_to_discard > discarded_descs_no) {
506 discarded_descs_no++;
507 /* Now increment the consumer index for the current job ring,
508 * AFTER saving job in temporary location!
509 * Increment the consumer index for the current job ring
511 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
514 hw_remove_entries(job_ring, 1);
517 if (do_notify == true) {
518 ASSERT(notified_descs != NULL);
519 *notified_descs = discarded_descs_no;
523 /* @brief Poll the HW for already processed jobs in the JR
524 * and notify the available jobs to UA.
526 * @param [in] job_ring The job ring to poll.
527 * @param [in] limit The maximum number of jobs to notify.
528 * If set to negative value, all available jobs are
531 * @retval >=0 for No of jobs notified to UA.
532 * @retval -1 for error
535 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
536 struct rte_crypto_op **ops, int32_t limit,
537 struct caam_jr_qp *jr_qp)
539 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
540 int32_t number_of_jobs_available = 0;
541 int32_t notified_descs_no = 0;
542 uint32_t sec_error_code = 0;
543 struct job_descriptor *current_desc;
544 phys_addr_t current_desc_addr;
545 phys_addr_t *temp_addr;
546 struct caam_jr_op_ctx *ctx;
548 /* TODO check for ops have memory*/
549 /* check here if any JR error that cannot be written
550 * in the output status word has occurred
552 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
553 CAAM_JR_INFO("err received");
554 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
555 GET_JR_REG(JRINT, job_ring));
556 if (unlikely(sec_error_code)) {
557 hw_job_ring_error_print(job_ring, sec_error_code);
561 /* compute the number of jobs available in the job ring based on the
562 * producer and consumer index values.
564 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
565 /* Compute the number of notifications that need to be raised to UA
566 * If limit > total number of done jobs -> notify all done jobs
567 * If limit = 0 -> error
568 * If limit < total number of done jobs -> notify a number
569 * of done jobs equal with limit
571 jobs_no_to_notify = (limit > number_of_jobs_available) ?
572 number_of_jobs_available : limit;
574 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
575 job_ring, job_ring->pidx, job_ring->cidx,
576 limit, number_of_jobs_available, jobs_no_to_notify);
580 while (jobs_no_to_notify > notified_descs_no) {
581 static uint64_t false_alarm;
582 static uint64_t real_poll;
584 /* Get job status here */
585 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
586 /* Get completed descriptor */
587 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
588 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
591 /* todo check if it is false alarm no desc present */
592 if (!current_desc_addr) {
594 printf("false alarm %" PRIu64 "real %" PRIu64
595 " sec_err =0x%x cidx Index =0%d\n",
596 false_alarm, real_poll,
597 sec_error_code, job_ring->cidx);
598 rte_panic("CAAM JR descriptor NULL");
599 return notified_descs_no;
601 current_desc = (struct job_descriptor *)
602 caam_jr_dma_ptov(current_desc_addr);
603 /* now increment the consumer index for the current job ring,
604 * AFTER saving job in temporary location!
606 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
608 /* Signal that the job has been processed and the slot is free*/
609 hw_remove_entries(job_ring, 1);
610 /*TODO for multiple ops, packets*/
611 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
612 if (unlikely(sec_error_code)) {
613 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
614 job_ring->cidx, sec_error_code);
615 hw_handle_job_ring_error(job_ring, sec_error_code);
616 //todo improve with exact errors
617 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
620 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
622 if (ctx->op->sym->m_dst) {
623 rte_hexdump(stdout, "PROCESSED",
624 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
625 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
627 rte_hexdump(stdout, "PROCESSED",
628 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
629 rte_pktmbuf_data_len(ctx->op->sym->m_src));
633 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
636 if (ctx->op->sym->m_dst) {
637 /*TODO check for ip header or other*/
638 ip4_hdr = (struct ip *)
639 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
640 ctx->op->sym->m_dst->pkt_len =
641 rte_be_to_cpu_16(ip4_hdr->ip_len);
642 ctx->op->sym->m_dst->data_len =
643 rte_be_to_cpu_16(ip4_hdr->ip_len);
645 ip4_hdr = (struct ip *)
646 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
647 ctx->op->sym->m_src->pkt_len =
648 rte_be_to_cpu_16(ip4_hdr->ip_len);
649 ctx->op->sym->m_src->data_len =
650 rte_be_to_cpu_16(ip4_hdr->ip_len);
654 caam_jr_op_ending(ctx);
658 return notified_descs_no;
662 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
665 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
666 struct sec_job_ring_t *ring = jr_qp->ring;
670 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
673 * If nb_ops < 0 -> poll JR until no more notifications are available.
674 * If nb_ops > 0 -> poll JR until limit is reached.
677 /* Run hw poll job ring */
678 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
680 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
684 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
686 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
687 if (num_rx < nb_ops) {
688 ret = caam_jr_enable_irqs(ring->irq_fd);
689 SEC_ASSERT(ret == 0, ret,
690 "Failed to enable irqs for job ring %p", ring);
692 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
694 /* Always enable IRQ generation when in pure IRQ mode */
695 ret = caam_jr_enable_irqs(ring->irq_fd);
696 SEC_ASSERT(ret == 0, ret,
697 "Failed to enable irqs for job ring %p", ring);
700 jr_qp->rx_pkts += num_rx;
707 * |<----data_len------->|
708 * |ip_header|ah_header|icv|payload|
713 static inline struct caam_jr_op_ctx *
714 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
716 struct rte_crypto_sym_op *sym = op->sym;
717 struct rte_mbuf *mbuf = sym->m_src;
718 struct caam_jr_op_ctx *ctx;
719 struct sec4_sg_entry *sg;
722 uint64_t sdesc_offset;
723 struct sec_job_descriptor_t *jobdescr;
731 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
732 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
737 ctx = caam_jr_alloc_ctx(ses);
744 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
746 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
748 SEC_JD_INIT(jobdescr);
749 SEC_JD_SET_SD(jobdescr,
750 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
751 cdb->sh_hdr.hi.field.idlen);
754 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
755 0, ses->digest_length);
759 length = sym->auth.data.length;
760 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
761 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
763 /* Successive segs */
767 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
768 sg->len = cpu_to_caam32(mbuf->data_len);
772 if (is_decode(ses)) {
773 /* digest verification case */
775 /* hash result or digest, save digest first */
776 rte_memcpy(ctx->digest, sym->auth.digest.data,
779 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
781 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
782 sg->len = cpu_to_caam32(ses->digest_length);
783 length += ses->digest_length;
785 sg->len -= ses->digest_length;
789 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
791 SEC_JD_SET_IN_PTR(jobdescr,
792 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
793 /* enabling sg list */
794 (jobdescr)->seq_in.command.word |= 0x01000000;
799 static inline struct caam_jr_op_ctx *
800 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
802 struct rte_crypto_sym_op *sym = op->sym;
803 struct caam_jr_op_ctx *ctx;
804 struct sec4_sg_entry *sg;
805 rte_iova_t start_addr;
807 uint64_t sdesc_offset;
808 struct sec_job_descriptor_t *jobdescr;
810 ctx = caam_jr_alloc_ctx(ses);
817 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
819 start_addr = rte_pktmbuf_iova(sym->m_src);
821 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
823 SEC_JD_INIT(jobdescr);
824 SEC_JD_SET_SD(jobdescr,
825 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
826 cdb->sh_hdr.hi.field.idlen);
829 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
830 0, ses->digest_length);
833 if (is_decode(ses)) {
835 SEC_JD_SET_IN_PTR(jobdescr,
836 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
837 (sym->auth.data.length + ses->digest_length));
838 /* enabling sg list */
839 (jobdescr)->seq_in.command.word |= 0x01000000;
841 /* hash result or digest, save digest first */
842 rte_memcpy(ctx->digest, sym->auth.digest.data,
844 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
845 sg->len = cpu_to_caam32(sym->auth.data.length);
848 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
850 /* let's check digest by hw */
852 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
853 sg->len = cpu_to_caam32(ses->digest_length);
855 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
857 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
858 sym->auth.data.offset, sym->auth.data.length);
863 static inline struct caam_jr_op_ctx *
864 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
866 struct rte_crypto_sym_op *sym = op->sym;
867 struct rte_mbuf *mbuf = sym->m_src;
868 struct caam_jr_op_ctx *ctx;
869 struct sec4_sg_entry *sg, *in_sg;
872 uint64_t sdesc_offset;
873 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
875 struct sec_job_descriptor_t *jobdescr;
880 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
883 reg_segs = mbuf->nb_segs * 2 + 2;
886 if (reg_segs > MAX_SG_ENTRIES) {
887 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
892 ctx = caam_jr_alloc_ctx(ses);
898 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
900 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
902 SEC_JD_INIT(jobdescr);
903 SEC_JD_SET_SD(jobdescr,
904 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
905 cdb->sh_hdr.hi.field.idlen);
908 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
909 sym->m_src->data_off, sym->cipher.data.offset,
910 sym->cipher.data.length, ses->iv.length);
919 length = sym->cipher.data.length;
921 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
922 + sym->cipher.data.offset);
923 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
925 /* Successive segs */
929 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
930 sg->len = cpu_to_caam32(mbuf->data_len);
934 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
936 SEC_JD_SET_OUT_PTR(jobdescr,
937 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
940 (jobdescr)->seq_out.command.word |= 0x01000000;
947 length = sym->cipher.data.length + ses->iv.length;
950 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
951 sg->len = cpu_to_caam32(ses->iv.length);
955 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
956 + sym->cipher.data.offset);
957 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
959 /* Successive segs */
963 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
964 sg->len = cpu_to_caam32(mbuf->data_len);
968 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
971 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
974 (jobdescr)->seq_in.command.word |= 0x01000000;
979 static inline struct caam_jr_op_ctx *
980 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
982 struct rte_crypto_sym_op *sym = op->sym;
983 struct caam_jr_op_ctx *ctx;
984 struct sec4_sg_entry *sg;
985 rte_iova_t src_start_addr, dst_start_addr;
987 uint64_t sdesc_offset;
988 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
990 struct sec_job_descriptor_t *jobdescr;
992 ctx = caam_jr_alloc_ctx(ses);
998 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1000 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1002 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1004 dst_start_addr = src_start_addr;
1006 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1008 SEC_JD_INIT(jobdescr);
1009 SEC_JD_SET_SD(jobdescr,
1010 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1011 cdb->sh_hdr.hi.field.idlen);
1014 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1015 sym->m_src->data_off, sym->cipher.data.offset,
1016 sym->cipher.data.length, ses->iv.length);
1019 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1020 sym->cipher.data.offset,
1021 sym->cipher.data.length + ses->iv.length);
1025 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1026 sym->cipher.data.length + ses->iv.length);
1027 /*enabling sg bit */
1028 (jobdescr)->seq_in.command.word |= 0x01000000;
1030 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1031 sg->len = cpu_to_caam32(ses->iv.length);
1034 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1035 sg->len = cpu_to_caam32(sym->cipher.data.length);
1037 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1042 /* For decapsulation:
1044 * +----+----------------+--------------------------------+-----+
1045 * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1046 * +----+----------------+--------------------------------+-----+
1048 * +----+--------------------------+
1049 * | Decrypted & authenticated data |
1050 * +----+--------------------------+
1053 static inline struct caam_jr_op_ctx *
1054 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1056 struct rte_crypto_sym_op *sym = op->sym;
1057 struct caam_jr_op_ctx *ctx;
1058 struct sec4_sg_entry *sg, *out_sg, *in_sg;
1059 struct rte_mbuf *mbuf;
1060 uint32_t length = 0;
1061 struct sec_cdb *cdb;
1062 uint64_t sdesc_offset;
1064 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1066 struct sec_job_descriptor_t *jobdescr;
1067 uint16_t auth_hdr_len = sym->cipher.data.offset -
1068 sym->auth.data.offset;
1069 uint16_t auth_tail_len = sym->auth.data.length -
1070 sym->cipher.data.length - auth_hdr_len;
1071 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1075 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1078 req_segs = mbuf->nb_segs * 2 + 3;
1081 if (req_segs > MAX_SG_ENTRIES) {
1082 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1087 ctx = caam_jr_alloc_ctx(ses);
1093 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1095 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1097 SEC_JD_INIT(jobdescr);
1098 SEC_JD_SET_SD(jobdescr,
1099 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1100 cdb->sh_hdr.hi.field.idlen);
1108 out_sg = &ctx->sg[0];
1110 length = sym->auth.data.length + ses->digest_length;
1112 length = sym->auth.data.length;
1117 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1118 + sym->auth.data.offset);
1119 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1121 /* Successive segs */
1125 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1126 sg->len = cpu_to_caam32(mbuf->data_len);
1130 if (is_encode(ses)) {
1131 /* set auth output */
1133 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1134 sg->len = cpu_to_caam32(ses->digest_length);
1137 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1139 SEC_JD_SET_OUT_PTR(jobdescr,
1140 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1142 (jobdescr)->seq_out.command.word |= 0x01000000;
1149 length = ses->iv.length + sym->auth.data.length;
1151 length = ses->iv.length + sym->auth.data.length
1152 + ses->digest_length;
1154 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1155 sg->len = cpu_to_caam32(ses->iv.length);
1159 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1160 + sym->auth.data.offset);
1161 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1163 /* Successive segs */
1167 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1168 sg->len = cpu_to_caam32(mbuf->data_len);
1172 if (is_decode(ses)) {
1174 rte_memcpy(ctx->digest, sym->auth.digest.data,
1175 ses->digest_length);
1176 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1177 sg->len = cpu_to_caam32(ses->digest_length);
1180 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1182 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1185 (jobdescr)->seq_in.command.word |= 0x01000000;
1186 /* Auth_only_len is set as 0 in descriptor and it is
1187 * overwritten here in the jd which will update
1192 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1197 static inline struct caam_jr_op_ctx *
1198 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1200 struct rte_crypto_sym_op *sym = op->sym;
1201 struct caam_jr_op_ctx *ctx;
1202 struct sec4_sg_entry *sg;
1203 rte_iova_t src_start_addr, dst_start_addr;
1204 uint32_t length = 0;
1205 struct sec_cdb *cdb;
1206 uint64_t sdesc_offset;
1207 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1209 struct sec_job_descriptor_t *jobdescr;
1210 uint16_t auth_hdr_len = sym->cipher.data.offset -
1211 sym->auth.data.offset;
1212 uint16_t auth_tail_len = sym->auth.data.length -
1213 sym->cipher.data.length - auth_hdr_len;
1214 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1216 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1218 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1220 dst_start_addr = src_start_addr;
1222 ctx = caam_jr_alloc_ctx(ses);
1228 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1230 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1232 SEC_JD_INIT(jobdescr);
1233 SEC_JD_SET_SD(jobdescr,
1234 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1235 cdb->sh_hdr.hi.field.idlen);
1239 if (is_encode(ses)) {
1240 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1241 sg->len = cpu_to_caam32(ses->iv.length);
1242 length += ses->iv.length;
1245 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1246 sg->len = cpu_to_caam32(sym->auth.data.length);
1247 length += sym->auth.data.length;
1249 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1251 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1252 sg->len = cpu_to_caam32(ses->iv.length);
1253 length += ses->iv.length;
1256 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1257 sg->len = cpu_to_caam32(sym->auth.data.length);
1258 length += sym->auth.data.length;
1260 rte_memcpy(ctx->digest, sym->auth.digest.data,
1261 ses->digest_length);
1263 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1264 sg->len = cpu_to_caam32(ses->digest_length);
1265 length += ses->digest_length;
1267 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1270 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1273 (jobdescr)->seq_in.command.word |= 0x01000000;
1278 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1279 sg->len = cpu_to_caam32(sym->cipher.data.length);
1280 length = sym->cipher.data.length;
1282 if (is_encode(ses)) {
1283 /* set auth output */
1285 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1286 sg->len = cpu_to_caam32(ses->digest_length);
1287 length += ses->digest_length;
1290 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1292 SEC_JD_SET_OUT_PTR(jobdescr,
1293 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1295 (jobdescr)->seq_out.command.word |= 0x01000000;
1297 /* Auth_only_len is set as 0 in descriptor and it is
1298 * overwritten here in the jd which will update
1303 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1308 static inline struct caam_jr_op_ctx *
1309 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1311 struct rte_crypto_sym_op *sym = op->sym;
1312 struct caam_jr_op_ctx *ctx = NULL;
1313 phys_addr_t src_start_addr, dst_start_addr;
1314 struct sec_cdb *cdb;
1315 uint64_t sdesc_offset;
1316 struct sec_job_descriptor_t *jobdescr;
1318 ctx = caam_jr_alloc_ctx(ses);
1323 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1325 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1327 dst_start_addr = src_start_addr;
1330 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1332 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1334 SEC_JD_INIT(jobdescr);
1335 SEC_JD_SET_SD(jobdescr,
1336 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1337 cdb->sh_hdr.hi.field.idlen);
1340 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1341 sym->m_src->buf_len - sym->m_src->data_off);
1343 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1344 sym->m_src->pkt_len);
1345 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1351 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1353 struct sec_job_ring_t *ring = qp->ring;
1354 struct caam_jr_session *ses;
1355 struct caam_jr_op_ctx *ctx = NULL;
1356 struct sec_job_descriptor_t *jobdescr __rte_unused;
1358 switch (op->sess_type) {
1359 case RTE_CRYPTO_OP_WITH_SESSION:
1360 ses = (struct caam_jr_session *)
1361 get_sym_session_private_data(op->sym->session,
1362 cryptodev_driver_id);
1364 case RTE_CRYPTO_OP_SECURITY_SESSION:
1365 ses = (struct caam_jr_session *)
1366 get_sec_session_private_data(
1367 op->sym->sec_session);
1370 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1375 if (unlikely(!ses->qp || ses->qp != qp)) {
1376 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1378 caam_jr_prep_cdb(ses);
1381 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1382 if (is_auth_cipher(ses))
1383 ctx = build_cipher_auth(op, ses);
1384 else if (is_aead(ses))
1386 else if (is_auth_only(ses))
1387 ctx = build_auth_only(op, ses);
1388 else if (is_cipher_only(ses))
1389 ctx = build_cipher_only(op, ses);
1390 else if (is_proto_ipsec(ses))
1391 ctx = build_proto(op, ses);
1393 if (is_auth_cipher(ses))
1394 ctx = build_cipher_auth_sg(op, ses);
1395 else if (is_aead(ses))
1397 else if (is_auth_only(ses))
1398 ctx = build_auth_only_sg(op, ses);
1399 else if (is_cipher_only(ses))
1400 ctx = build_cipher_only_sg(op, ses);
1403 if (unlikely(!ctx)) {
1405 CAAM_JR_ERR("not supported sec op");
1410 rte_hexdump(stdout, "DECODE",
1411 rte_pktmbuf_mtod(op->sym->m_src, void *),
1412 rte_pktmbuf_data_len(op->sym->m_src));
1414 rte_hexdump(stdout, "ENCODE",
1415 rte_pktmbuf_mtod(op->sym->m_src, void *),
1416 rte_pktmbuf_data_len(op->sym->m_src));
1418 printf("\n JD before conversion\n");
1419 for (int i = 0; i < 12; i++)
1420 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1423 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1424 ring, ring->pidx, ring->cidx);
1426 /* todo - do we want to retry */
1427 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1428 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1429 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1430 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1431 caam_jr_op_ending(ctx);
1436 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1437 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1439 jobdescr->deschdr.command.word =
1440 cpu_to_caam32(jobdescr->deschdr.command.word);
1441 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1442 jobdescr->seq_out.command.word =
1443 cpu_to_caam32(jobdescr->seq_out.command.word);
1444 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1445 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1446 jobdescr->seq_in.command.word =
1447 cpu_to_caam32(jobdescr->seq_in.command.word);
1448 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1449 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1450 jobdescr->load_dpovrd.command.word =
1451 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1452 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1455 /* Set ptr in input ring to current descriptor */
1456 sec_write_addr(&ring->input_ring[ring->pidx],
1457 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1460 /* Notify HW that a new job is enqueued */
1461 hw_enqueue_desc_on_job_ring(ring);
1463 /* increment the producer index for the current job ring */
1464 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1470 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1473 /* Function to transmit the frames to given device and queuepair */
1476 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1477 uint16_t num_tx = 0;
1478 /*Prepare each packet which is to be sent*/
1479 for (loop = 0; loop < nb_ops; loop++) {
1480 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1485 jr_qp->tx_pkts += num_tx;
1490 /* Release queue pair */
1492 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1495 struct sec_job_ring_t *internals;
1496 struct caam_jr_qp *qp = NULL;
1498 PMD_INIT_FUNC_TRACE();
1499 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1501 internals = dev->data->dev_private;
1502 if (qp_id >= internals->max_nb_queue_pairs) {
1503 CAAM_JR_ERR("Max supported qpid %d",
1504 internals->max_nb_queue_pairs);
1508 qp = &internals->qps[qp_id];
1510 dev->data->queue_pairs[qp_id] = NULL;
1515 /* Setup a queue pair */
1517 caam_jr_queue_pair_setup(
1518 struct rte_cryptodev *dev, uint16_t qp_id,
1519 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1520 __rte_unused int socket_id)
1522 struct sec_job_ring_t *internals;
1523 struct caam_jr_qp *qp = NULL;
1525 PMD_INIT_FUNC_TRACE();
1526 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1528 internals = dev->data->dev_private;
1529 if (qp_id >= internals->max_nb_queue_pairs) {
1530 CAAM_JR_ERR("Max supported qpid %d",
1531 internals->max_nb_queue_pairs);
1535 qp = &internals->qps[qp_id];
1536 qp->ring = internals;
1537 dev->data->queue_pairs[qp_id] = qp;
1542 /* Return the number of allocated queue pairs */
1544 caam_jr_queue_pair_count(struct rte_cryptodev *dev)
1546 PMD_INIT_FUNC_TRACE();
1548 return dev->data->nb_queue_pairs;
1551 /* Returns the size of the aesni gcm session structure */
1553 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1555 PMD_INIT_FUNC_TRACE();
1557 return sizeof(struct caam_jr_session);
1561 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1562 struct rte_crypto_sym_xform *xform,
1563 struct caam_jr_session *session)
1565 session->cipher_alg = xform->cipher.algo;
1566 session->iv.length = xform->cipher.iv.length;
1567 session->iv.offset = xform->cipher.iv.offset;
1568 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1569 RTE_CACHE_LINE_SIZE);
1570 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1571 CAAM_JR_ERR("No Memory for cipher key\n");
1574 session->cipher_key.length = xform->cipher.key.length;
1576 memcpy(session->cipher_key.data, xform->cipher.key.data,
1577 xform->cipher.key.length);
1578 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1585 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1586 struct rte_crypto_sym_xform *xform,
1587 struct caam_jr_session *session)
1589 session->auth_alg = xform->auth.algo;
1590 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1591 RTE_CACHE_LINE_SIZE);
1592 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1593 CAAM_JR_ERR("No Memory for auth key\n");
1596 session->auth_key.length = xform->auth.key.length;
1597 session->digest_length = xform->auth.digest_length;
1599 memcpy(session->auth_key.data, xform->auth.key.data,
1600 xform->auth.key.length);
1601 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1608 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1609 struct rte_crypto_sym_xform *xform,
1610 struct caam_jr_session *session)
1612 session->aead_alg = xform->aead.algo;
1613 session->iv.length = xform->aead.iv.length;
1614 session->iv.offset = xform->aead.iv.offset;
1615 session->auth_only_len = xform->aead.aad_length;
1616 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1617 RTE_CACHE_LINE_SIZE);
1618 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1619 CAAM_JR_ERR("No Memory for aead key\n");
1622 session->aead_key.length = xform->aead.key.length;
1623 session->digest_length = xform->aead.digest_length;
1625 memcpy(session->aead_key.data, xform->aead.key.data,
1626 xform->aead.key.length);
1627 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1634 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1635 struct rte_crypto_sym_xform *xform, void *sess)
1637 struct sec_job_ring_t *internals = dev->data->dev_private;
1638 struct caam_jr_session *session = sess;
1640 PMD_INIT_FUNC_TRACE();
1642 if (unlikely(sess == NULL)) {
1643 CAAM_JR_ERR("invalid session struct");
1647 /* Default IV length = 0 */
1648 session->iv.length = 0;
1651 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1652 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1653 caam_jr_cipher_init(dev, xform, session);
1655 /* Authentication Only */
1656 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1657 xform->next == NULL) {
1658 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1659 caam_jr_auth_init(dev, xform, session);
1661 /* Cipher then Authenticate */
1662 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1663 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1664 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1665 caam_jr_cipher_init(dev, xform, session);
1666 caam_jr_auth_init(dev, xform->next, session);
1668 CAAM_JR_ERR("Not supported: Auth then Cipher");
1672 /* Authenticate then Cipher */
1673 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1674 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1675 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1676 caam_jr_auth_init(dev, xform, session);
1677 caam_jr_cipher_init(dev, xform->next, session);
1679 CAAM_JR_ERR("Not supported: Auth then Cipher");
1683 /* AEAD operation for AES-GCM kind of Algorithms */
1684 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1685 xform->next == NULL) {
1686 caam_jr_aead_init(dev, xform, session);
1689 CAAM_JR_ERR("Invalid crypto type");
1692 session->ctx_pool = internals->ctx_pool;
1697 rte_free(session->cipher_key.data);
1698 rte_free(session->auth_key.data);
1699 memset(session, 0, sizeof(struct caam_jr_session));
1705 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1706 struct rte_crypto_sym_xform *xform,
1707 struct rte_cryptodev_sym_session *sess,
1708 struct rte_mempool *mempool)
1710 void *sess_private_data;
1713 PMD_INIT_FUNC_TRACE();
1715 if (rte_mempool_get(mempool, &sess_private_data)) {
1716 CAAM_JR_ERR("Couldn't get object from session mempool");
1720 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1721 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1723 CAAM_JR_ERR("failed to configure session parameters");
1724 /* Return session to mempool */
1725 rte_mempool_put(mempool, sess_private_data);
1729 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1734 /* Clear the memory of session so it doesn't leave key material behind */
1736 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1737 struct rte_cryptodev_sym_session *sess)
1739 uint8_t index = dev->driver_id;
1740 void *sess_priv = get_sym_session_private_data(sess, index);
1741 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1743 PMD_INIT_FUNC_TRACE();
1746 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1748 rte_free(s->cipher_key.data);
1749 rte_free(s->auth_key.data);
1750 memset(s, 0, sizeof(struct caam_jr_session));
1751 set_sym_session_private_data(sess, index, NULL);
1752 rte_mempool_put(sess_mp, sess_priv);
1757 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1758 struct rte_security_session_conf *conf,
1761 struct sec_job_ring_t *internals = dev->data->dev_private;
1762 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1763 struct rte_crypto_auth_xform *auth_xform;
1764 struct rte_crypto_cipher_xform *cipher_xform;
1765 struct caam_jr_session *session = (struct caam_jr_session *)sess;
1767 PMD_INIT_FUNC_TRACE();
1769 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1770 cipher_xform = &conf->crypto_xform->cipher;
1771 auth_xform = &conf->crypto_xform->next->auth;
1773 auth_xform = &conf->crypto_xform->auth;
1774 cipher_xform = &conf->crypto_xform->next->cipher;
1776 session->proto_alg = conf->protocol;
1777 session->cipher_key.data = rte_zmalloc(NULL,
1778 cipher_xform->key.length,
1779 RTE_CACHE_LINE_SIZE);
1780 if (session->cipher_key.data == NULL &&
1781 cipher_xform->key.length > 0) {
1782 CAAM_JR_ERR("No Memory for cipher key\n");
1786 session->cipher_key.length = cipher_xform->key.length;
1787 session->auth_key.data = rte_zmalloc(NULL,
1788 auth_xform->key.length,
1789 RTE_CACHE_LINE_SIZE);
1790 if (session->auth_key.data == NULL &&
1791 auth_xform->key.length > 0) {
1792 CAAM_JR_ERR("No Memory for auth key\n");
1793 rte_free(session->cipher_key.data);
1796 session->auth_key.length = auth_xform->key.length;
1797 memcpy(session->cipher_key.data, cipher_xform->key.data,
1798 cipher_xform->key.length);
1799 memcpy(session->auth_key.data, auth_xform->key.data,
1800 auth_xform->key.length);
1802 switch (auth_xform->algo) {
1803 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1804 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1806 case RTE_CRYPTO_AUTH_MD5_HMAC:
1807 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1809 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1810 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1812 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1813 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1815 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1816 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1818 case RTE_CRYPTO_AUTH_AES_CMAC:
1819 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1821 case RTE_CRYPTO_AUTH_NULL:
1822 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1824 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1825 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1826 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1827 case RTE_CRYPTO_AUTH_SHA1:
1828 case RTE_CRYPTO_AUTH_SHA256:
1829 case RTE_CRYPTO_AUTH_SHA512:
1830 case RTE_CRYPTO_AUTH_SHA224:
1831 case RTE_CRYPTO_AUTH_SHA384:
1832 case RTE_CRYPTO_AUTH_MD5:
1833 case RTE_CRYPTO_AUTH_AES_GMAC:
1834 case RTE_CRYPTO_AUTH_KASUMI_F9:
1835 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1836 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1837 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1841 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1846 switch (cipher_xform->algo) {
1847 case RTE_CRYPTO_CIPHER_AES_CBC:
1848 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1850 case RTE_CRYPTO_CIPHER_3DES_CBC:
1851 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1853 case RTE_CRYPTO_CIPHER_AES_CTR:
1854 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1856 case RTE_CRYPTO_CIPHER_NULL:
1857 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1858 case RTE_CRYPTO_CIPHER_3DES_ECB:
1859 case RTE_CRYPTO_CIPHER_AES_ECB:
1860 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1861 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1862 cipher_xform->algo);
1865 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1866 cipher_xform->algo);
1870 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1871 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1872 sizeof(session->ip4_hdr));
1873 session->ip4_hdr.ip_v = IPVERSION;
1874 session->ip4_hdr.ip_hl = 5;
1875 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1876 sizeof(session->ip4_hdr));
1877 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1878 session->ip4_hdr.ip_id = 0;
1879 session->ip4_hdr.ip_off = 0;
1880 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1881 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1882 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1884 session->ip4_hdr.ip_sum = 0;
1885 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1886 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1887 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1888 (void *)&session->ip4_hdr,
1891 session->encap_pdb.options =
1892 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1893 PDBOPTS_ESP_OIHI_PDB_INL |
1895 PDBHMO_ESP_ENCAP_DTTL;
1896 if (ipsec_xform->options.esn)
1897 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1898 session->encap_pdb.spi = ipsec_xform->spi;
1899 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1901 session->dir = DIR_ENC;
1902 } else if (ipsec_xform->direction ==
1903 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1904 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1905 session->decap_pdb.options = sizeof(struct ip) << 16;
1906 if (ipsec_xform->options.esn)
1907 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1908 session->dir = DIR_DEC;
1911 session->ctx_pool = internals->ctx_pool;
1915 rte_free(session->auth_key.data);
1916 rte_free(session->cipher_key.data);
1917 memset(session, 0, sizeof(struct caam_jr_session));
1922 caam_jr_security_session_create(void *dev,
1923 struct rte_security_session_conf *conf,
1924 struct rte_security_session *sess,
1925 struct rte_mempool *mempool)
1927 void *sess_private_data;
1928 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1931 if (rte_mempool_get(mempool, &sess_private_data)) {
1932 CAAM_JR_ERR("Couldn't get object from session mempool");
1936 switch (conf->protocol) {
1937 case RTE_SECURITY_PROTOCOL_IPSEC:
1938 ret = caam_jr_set_ipsec_session(cdev, conf,
1941 case RTE_SECURITY_PROTOCOL_MACSEC:
1947 CAAM_JR_ERR("failed to configure session parameters");
1948 /* Return session to mempool */
1949 rte_mempool_put(mempool, sess_private_data);
1953 set_sec_session_private_data(sess, sess_private_data);
1958 /* Clear the memory of session so it doesn't leave key material behind */
1960 caam_jr_security_session_destroy(void *dev __rte_unused,
1961 struct rte_security_session *sess)
1963 PMD_INIT_FUNC_TRACE();
1964 void *sess_priv = get_sec_session_private_data(sess);
1966 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1969 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1971 rte_free(s->cipher_key.data);
1972 rte_free(s->auth_key.data);
1973 memset(sess, 0, sizeof(struct caam_jr_session));
1974 set_sec_session_private_data(sess, NULL);
1975 rte_mempool_put(sess_mp, sess_priv);
1982 caam_jr_dev_configure(struct rte_cryptodev *dev,
1983 struct rte_cryptodev_config *config __rte_unused)
1986 struct sec_job_ring_t *internals;
1988 PMD_INIT_FUNC_TRACE();
1990 internals = dev->data->dev_private;
1991 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1992 if (!internals->ctx_pool) {
1993 internals->ctx_pool = rte_mempool_create((const char *)str,
1995 sizeof(struct caam_jr_op_ctx),
1996 CTX_POOL_CACHE_SIZE, 0,
1997 NULL, NULL, NULL, NULL,
1999 if (!internals->ctx_pool) {
2000 CAAM_JR_ERR("%s create failed\n", str);
2004 CAAM_JR_INFO("mempool already created for dev_id : %d",
2011 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2013 PMD_INIT_FUNC_TRACE();
2018 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2020 PMD_INIT_FUNC_TRACE();
2024 caam_jr_dev_close(struct rte_cryptodev *dev)
2026 struct sec_job_ring_t *internals;
2028 PMD_INIT_FUNC_TRACE();
2033 internals = dev->data->dev_private;
2034 rte_mempool_free(internals->ctx_pool);
2035 internals->ctx_pool = NULL;
2041 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2042 struct rte_cryptodev_info *info)
2044 struct sec_job_ring_t *internals = dev->data->dev_private;
2046 PMD_INIT_FUNC_TRACE();
2048 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2049 info->feature_flags = dev->feature_flags;
2050 info->capabilities = caam_jr_get_cryptodev_capabilities();
2051 info->sym.max_nb_sessions = internals->max_nb_sessions;
2052 info->driver_id = cryptodev_driver_id;
2056 static struct rte_cryptodev_ops caam_jr_ops = {
2057 .dev_configure = caam_jr_dev_configure,
2058 .dev_start = caam_jr_dev_start,
2059 .dev_stop = caam_jr_dev_stop,
2060 .dev_close = caam_jr_dev_close,
2061 .dev_infos_get = caam_jr_dev_infos_get,
2062 .stats_get = caam_jr_stats_get,
2063 .stats_reset = caam_jr_stats_reset,
2064 .queue_pair_setup = caam_jr_queue_pair_setup,
2065 .queue_pair_release = caam_jr_queue_pair_release,
2066 .queue_pair_count = caam_jr_queue_pair_count,
2067 .sym_session_get_size = caam_jr_sym_session_get_size,
2068 .sym_session_configure = caam_jr_sym_session_configure,
2069 .sym_session_clear = caam_jr_sym_session_clear
2072 static struct rte_security_ops caam_jr_security_ops = {
2073 .session_create = caam_jr_security_session_create,
2074 .session_update = NULL,
2075 .session_stats_get = NULL,
2076 .session_destroy = caam_jr_security_session_destroy,
2077 .set_pkt_metadata = NULL,
2078 .capabilities_get = caam_jr_get_security_capabilities
2081 /* @brief Flush job rings of any processed descs.
2082 * The processed descs are silently dropped,
2083 * WITHOUT being notified to UA.
2086 close_job_ring(struct sec_job_ring_t *job_ring)
2088 if (job_ring->irq_fd) {
2089 /* Producer index is frozen. If consumer index is not equal
2090 * with producer index, then we have descs to flush.
2092 while (job_ring->pidx != job_ring->cidx)
2093 hw_flush_job_ring(job_ring, false, NULL);
2095 /* free the uio job ring */
2096 free_job_ring(job_ring->irq_fd);
2097 job_ring->irq_fd = 0;
2098 caam_jr_dma_free(job_ring->input_ring);
2099 caam_jr_dma_free(job_ring->output_ring);
2104 /** @brief Release the software and hardware resources tied to a job ring.
2105 * @param [in] job_ring The job ring
2107 * @retval 0 for success
2108 * @retval -1 for error
2111 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2115 PMD_INIT_FUNC_TRACE();
2116 ASSERT(job_ring != NULL);
2117 ret = hw_shutdown_job_ring(job_ring);
2118 SEC_ASSERT(ret == 0, ret,
2119 "Failed to shutdown hardware job ring %p",
2122 if (job_ring->coalescing_en)
2123 hw_job_ring_disable_coalescing(job_ring);
2125 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2126 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2127 SEC_ASSERT(ret == 0, ret,
2128 "Failed to disable irqs for job ring %p",
2136 * @brief Release the resources used by the SEC user space driver.
2138 * Reset and release SEC's job rings indicated by the User Application at
2139 * init_job_ring() and free any memory allocated internally.
2140 * Call once during application tear down.
2142 * @note In case there are any descriptors in-flight (descriptors received by
2143 * SEC driver for processing and for which no response was yet provided to UA),
2144 * the descriptors are discarded without any notifications to User Application.
2146 * @retval ::0 is returned for a successful execution
2147 * @retval ::-1 is returned if SEC driver release is in progress
2150 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2152 struct sec_job_ring_t *internals;
2154 PMD_INIT_FUNC_TRACE();
2158 internals = dev->data->dev_private;
2159 rte_free(dev->security_ctx);
2161 /* If any descriptors in flight , poll and wait
2162 * until all descriptors are received and silently discarded.
2165 shutdown_job_ring(internals);
2166 close_job_ring(internals);
2167 rte_mempool_free(internals->ctx_pool);
2170 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2172 /* last caam jr instance) */
2173 if (g_job_rings_no == 0)
2174 g_driver_state = SEC_DRIVER_STATE_IDLE;
2179 /* @brief Initialize the software and hardware resources tied to a job ring.
2180 * @param [in] jr_mode; Model to be used by SEC Driver to receive
2181 * notifications from SEC. Can be either
2182 * of the three: #SEC_NOTIFICATION_TYPE_NAPI
2183 * #SEC_NOTIFICATION_TYPE_IRQ or
2184 * #SEC_NOTIFICATION_TYPE_POLL
2185 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
2186 * startup. Used only when #SEC_NOTIFICATION_TYPE
2187 * is set to #SEC_NOTIFICATION_TYPE_NAPI.
2188 * @param [in] irq_coalescing_timer This value determines the maximum
2189 * amount of time after processing a
2190 * descriptor before raising an interrupt.
2191 * @param [in] irq_coalescing_count This value determines how many
2192 * descriptors are completed before
2193 * raising an interrupt.
2194 * @param [in] reg_base_addr, The job ring base address register
2195 * @param [in] irq_id The job ring interrupt identification number.
2196 * @retval job_ring_handle for successful job ring configuration
2197 * @retval NULL on error
2201 init_job_ring(void *reg_base_addr, uint32_t irq_id)
2203 struct sec_job_ring_t *job_ring = NULL;
2205 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2207 int irq_coalescing_timer = 0;
2208 int irq_coalescing_count = 0;
2210 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2211 if (g_job_rings[i].irq_fd == 0) {
2212 job_ring = &g_job_rings[i];
2217 if (job_ring == NULL) {
2218 CAAM_JR_ERR("No free job ring\n");
2222 job_ring->register_base_addr = reg_base_addr;
2223 job_ring->jr_mode = jr_mode;
2224 job_ring->napi_mode = 0;
2225 job_ring->irq_fd = irq_id;
2227 /* Allocate mem for input and output ring */
2229 /* Allocate memory for input ring */
2230 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2231 SEC_DMA_MEM_INPUT_RING_SIZE);
2232 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2234 /* Allocate memory for output ring */
2235 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2236 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2237 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2239 /* Reset job ring in SEC hw and configure job ring registers */
2240 ret = hw_reset_job_ring(job_ring);
2242 CAAM_JR_ERR("Failed to reset hardware job ring");
2246 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2247 /* When SEC US driver works in NAPI mode, the UA can select
2248 * if the driver starts with IRQs on or off.
2250 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2251 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2253 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2255 CAAM_JR_ERR("Failed to enable irqs for job ring");
2259 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2260 /* When SEC US driver works in pure interrupt mode,
2261 * IRQ's are always enabled.
2263 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2265 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2267 CAAM_JR_ERR("Failed to enable irqs for job ring");
2271 if (irq_coalescing_timer || irq_coalescing_count) {
2272 hw_job_ring_set_coalescing_param(job_ring,
2273 irq_coalescing_timer,
2274 irq_coalescing_count);
2276 hw_job_ring_enable_coalescing(job_ring);
2277 job_ring->coalescing_en = 1;
2280 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2281 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2282 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2286 caam_jr_dma_free(job_ring->output_ring);
2287 caam_jr_dma_free(job_ring->input_ring);
2293 caam_jr_dev_init(const char *name,
2294 struct rte_vdev_device *vdev,
2295 struct rte_cryptodev_pmd_init_params *init_params)
2297 struct rte_cryptodev *dev;
2298 struct rte_security_ctx *security_instance;
2299 struct uio_job_ring *job_ring;
2300 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2302 PMD_INIT_FUNC_TRACE();
2304 /* Validate driver state */
2305 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2306 g_job_rings_max = sec_configure();
2307 if (!g_job_rings_max) {
2308 CAAM_JR_ERR("No job ring detected on UIO !!!!");
2311 /* Update driver state */
2312 g_driver_state = SEC_DRIVER_STATE_STARTED;
2315 if (g_job_rings_no >= g_job_rings_max) {
2316 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2321 job_ring = config_job_ring();
2322 if (job_ring == NULL) {
2323 CAAM_JR_ERR("failed to create job ring");
2327 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2329 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2331 CAAM_JR_ERR("failed to create cryptodev vdev");
2334 /*TODO free it during teardown*/
2335 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2338 if (!dev->data->dev_private) {
2339 CAAM_JR_ERR("Ring memory allocation failed\n");
2343 dev->driver_id = cryptodev_driver_id;
2344 dev->dev_ops = &caam_jr_ops;
2346 /* register rx/tx burst functions for data path */
2347 dev->dequeue_burst = caam_jr_dequeue_burst;
2348 dev->enqueue_burst = caam_jr_enqueue_burst;
2349 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2350 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2351 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2352 RTE_CRYPTODEV_FF_SECURITY |
2353 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2354 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2355 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2356 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2357 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2359 /* For secondary processes, we don't initialise any further as primary
2360 * has already done this work. Only check we don't need a different
2363 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2364 CAAM_JR_WARN("Device already init by primary process");
2368 /*TODO free it during teardown*/
2369 security_instance = rte_malloc("caam_jr",
2370 sizeof(struct rte_security_ctx), 0);
2371 if (security_instance == NULL) {
2372 CAAM_JR_ERR("memory allocation failed\n");
2373 //todo error handling.
2377 security_instance->device = (void *)dev;
2378 security_instance->ops = &caam_jr_security_ops;
2379 security_instance->sess_cnt = 0;
2380 dev->security_ctx = security_instance;
2382 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2387 caam_jr_dev_uninit(dev);
2388 rte_cryptodev_pmd_release_device(dev);
2390 free_job_ring(job_ring->uio_fd);
2392 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2398 /** Initialise CAAM JR crypto device */
2400 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2402 struct rte_cryptodev_pmd_init_params init_params = {
2404 sizeof(struct sec_job_ring_t),
2406 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2409 const char *input_args;
2411 name = rte_vdev_device_name(vdev);
2415 input_args = rte_vdev_device_args(vdev);
2416 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2418 /* if sec device version is not configured */
2419 if (!rta_get_sec_era()) {
2420 const struct device_node *caam_node;
2422 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2423 const uint32_t *prop = of_get_property(caam_node,
2428 INTL_SEC_ERA(cpu_to_caam32(*prop)));
2433 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2434 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2436 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2441 return caam_jr_dev_init(name, vdev, &init_params);
2444 /** Uninitialise CAAM JR crypto device */
2446 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2448 struct rte_cryptodev *cryptodev;
2451 name = rte_vdev_device_name(vdev);
2455 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2456 if (cryptodev == NULL)
2459 caam_jr_dev_uninit(cryptodev);
2461 return rte_cryptodev_pmd_destroy(cryptodev);
2464 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2465 .probe = cryptodev_caam_jr_probe,
2466 .remove = cryptodev_caam_jr_remove
2469 static struct cryptodev_driver caam_jr_crypto_drv;
2471 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2472 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2473 "max_nb_queue_pairs=<int>"
2475 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2476 cryptodev_driver_id);
2478 RTE_INIT(caam_jr_init_log)
2480 caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2481 if (caam_jr_logtype >= 0)
2482 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);